chore: define common aliases for k8s packages (#8672)
* lchore: define common alias for k8s packages Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com> * Update .golangci.yaml Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com> * Update .golangci.yaml Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com> * Update .golangci.yaml Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com> --------- Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>pull/8876/head
parent
f0fde6e1d4
commit
c6a420bd3a
|
@ -132,6 +132,23 @@ linters:
|
|||
disable:
|
||||
- shadow
|
||||
disable-all: false
|
||||
|
||||
importas:
|
||||
alias:
|
||||
- alias: appsv1api
|
||||
pkg: k8s.io/api/apps/v1
|
||||
- alias: corev1api
|
||||
pkg: k8s.io/api/core/v1
|
||||
- alias: rbacv1
|
||||
pkg: k8s.io/api/rbac/v1
|
||||
- alias: apierrors
|
||||
pkg: k8s.io/apimachinery/pkg/api/errors
|
||||
- alias: apiextv1
|
||||
pkg: k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1
|
||||
- alias: metav1
|
||||
pkg: k8s.io/apimachinery/pkg/apis/meta/v1
|
||||
- alias: storagev1api
|
||||
pkg: k8s.io/api/storage/v1
|
||||
|
||||
lll:
|
||||
# max line length, lines longer will be reported. Default is 120.
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/builder"
|
||||
velerotest "github.com/vmware-tanzu/velero/pkg/test"
|
||||
|
@ -32,8 +32,8 @@ func TestNamespacedFileStore(t *testing.T) {
|
|||
name string
|
||||
namespace string
|
||||
fsRoot string
|
||||
secrets []*corev1.Secret
|
||||
secretSelector *corev1.SecretKeySelector
|
||||
secrets []*corev1api.Secret
|
||||
secretSelector *corev1api.SecretKeySelector
|
||||
wantErr string
|
||||
expectedPath string
|
||||
expectedContents string
|
||||
|
@ -48,7 +48,7 @@ func TestNamespacedFileStore(t *testing.T) {
|
|||
namespace: "ns1",
|
||||
fsRoot: "/tmp/credentials",
|
||||
secretSelector: builder.ForSecretKeySelector("credential", "key2").Result(),
|
||||
secrets: []*corev1.Secret{
|
||||
secrets: []*corev1api.Secret{
|
||||
builder.ForSecret("ns1", "credential").Data(map[string][]byte{
|
||||
"key1": []byte("ns1-secretdata1"),
|
||||
"key2": []byte("ns1-secretdata2"),
|
||||
|
|
|
@ -4,7 +4,7 @@ package mocks
|
|||
|
||||
import (
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// FileStore is an autogenerated mock type for the FileStore type
|
||||
|
@ -13,18 +13,18 @@ type FileStore struct {
|
|||
}
|
||||
|
||||
// Path provides a mock function with given fields: selector
|
||||
func (_m *FileStore) Path(selector *v1.SecretKeySelector) (string, error) {
|
||||
func (_m *FileStore) Path(selector *corev1api.SecretKeySelector) (string, error) {
|
||||
ret := _m.Called(selector)
|
||||
|
||||
var r0 string
|
||||
if rf, ok := ret.Get(0).(func(*v1.SecretKeySelector) string); ok {
|
||||
if rf, ok := ret.Get(0).(func(*corev1api.SecretKeySelector) string); ok {
|
||||
r0 = rf(selector)
|
||||
} else {
|
||||
r0 = ret.Get(0).(string)
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(*v1.SecretKeySelector) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(*corev1api.SecretKeySelector) error); ok {
|
||||
r1 = rf(selector)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
|
|
|
@ -4,7 +4,7 @@ package mocks
|
|||
|
||||
import (
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// SecretStore is an autogenerated mock type for the SecretStore type
|
||||
|
@ -13,18 +13,18 @@ type SecretStore struct {
|
|||
}
|
||||
|
||||
// Get provides a mock function with given fields: selector
|
||||
func (_m *SecretStore) Get(selector *v1.SecretKeySelector) (string, error) {
|
||||
func (_m *SecretStore) Get(selector *corev1api.SecretKeySelector) (string, error) {
|
||||
ret := _m.Called(selector)
|
||||
|
||||
var r0 string
|
||||
if rf, ok := ret.Get(0).(func(*v1.SecretKeySelector) string); ok {
|
||||
if rf, ok := ret.Get(0).(func(*corev1api.SecretKeySelector) string); ok {
|
||||
r0 = rf(selector)
|
||||
} else {
|
||||
r0 = ret.Get(0).(string)
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(*v1.SecretKeySelector) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(*corev1api.SecretKeySelector) error); ok {
|
||||
r1 = rf(selector)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
@ -37,7 +37,7 @@ type WaitExecHookHandler interface {
|
|||
HandleHooks(
|
||||
ctx context.Context,
|
||||
log logrus.FieldLogger,
|
||||
pod *v1.Pod,
|
||||
pod *corev1api.Pod,
|
||||
byContainer map[string][]PodExecRestoreHook,
|
||||
multiHookTracker *MultiHookTracker,
|
||||
restoreName string,
|
||||
|
@ -73,7 +73,7 @@ var _ WaitExecHookHandler = &DefaultWaitExecHookHandler{}
|
|||
func (e *DefaultWaitExecHookHandler) HandleHooks(
|
||||
ctx context.Context,
|
||||
log logrus.FieldLogger,
|
||||
pod *v1.Pod,
|
||||
pod *corev1api.Pod,
|
||||
byContainer map[string][]PodExecRestoreHook,
|
||||
multiHookTracker *MultiHookTracker,
|
||||
restoreName string,
|
||||
|
@ -117,7 +117,7 @@ func (e *DefaultWaitExecHookHandler) HandleHooks(
|
|||
// When a container is observed running and its hooks are executed, the container is deleted
|
||||
// from the byContainer map. When the map is empty the watch is ended.
|
||||
handler := func(newObj any) {
|
||||
newPod, ok := newObj.(*v1.Pod)
|
||||
newPod, ok := newObj.(*corev1api.Pod)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
@ -128,7 +128,7 @@ func (e *DefaultWaitExecHookHandler) HandleHooks(
|
|||
},
|
||||
)
|
||||
|
||||
if newPod.Status.Phase == v1.PodSucceeded || newPod.Status.Phase == v1.PodFailed {
|
||||
if newPod.Status.Phase == corev1api.PodSucceeded || newPod.Status.Phase == corev1api.PodFailed {
|
||||
err := fmt.Errorf("pod entered phase %s before some post-restore exec hooks ran", newPod.Status.Phase)
|
||||
podLog.Warning(err)
|
||||
cancel()
|
||||
|
@ -265,7 +265,7 @@ func (e *DefaultWaitExecHookHandler) HandleHooks(
|
|||
return errors
|
||||
}
|
||||
|
||||
func podHasContainer(pod *v1.Pod, containerName string) bool {
|
||||
func podHasContainer(pod *corev1api.Pod, containerName string) bool {
|
||||
if pod == nil {
|
||||
return false
|
||||
}
|
||||
|
@ -278,7 +278,7 @@ func podHasContainer(pod *v1.Pod, containerName string) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func isContainerUp(pod *v1.Pod, containerName string, hooks []PodExecRestoreHook) bool {
|
||||
func isContainerUp(pod *corev1api.Pod, containerName string, hooks []PodExecRestoreHook) bool {
|
||||
if pod == nil {
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -52,18 +52,18 @@ func TestWaitExecHandleHooks(t *testing.T) {
|
|||
type change struct {
|
||||
// delta to wait since last change applied or pod added
|
||||
wait time.Duration
|
||||
updated *v1.Pod
|
||||
updated *corev1api.Pod
|
||||
}
|
||||
type expectedExecution struct {
|
||||
hook *velerov1api.ExecHook
|
||||
name string
|
||||
error error
|
||||
pod *v1.Pod
|
||||
pod *corev1api.Pod
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
// Used as argument to HandleHooks and first state added to ListerWatcher
|
||||
initialPod *v1.Pod
|
||||
initialPod *corev1api.Pod
|
||||
groupResource string
|
||||
byContainer map[string][]PodExecRestoreHook
|
||||
expectedExecutions []expectedExecution
|
||||
|
@ -83,13 +83,13 @@ func TestWaitExecHandleHooks(t *testing.T) {
|
|||
podRestoreHookTimeoutAnnotationKey, "1s",
|
||||
podRestoreHookWaitTimeoutAnnotationKey, "1m",
|
||||
)).
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: v1.ContainerState{
|
||||
Running: &v1.ContainerStateRunning{},
|
||||
State: corev1api.ContainerState{
|
||||
Running: &corev1api.ContainerStateRunning{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
|
@ -128,13 +128,13 @@ func TestWaitExecHandleHooks(t *testing.T) {
|
|||
podRestoreHookTimeoutAnnotationKey, "1s",
|
||||
podRestoreHookWaitTimeoutAnnotationKey, "1m",
|
||||
)).
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: v1.ContainerState{
|
||||
Running: &v1.ContainerStateRunning{},
|
||||
State: corev1api.ContainerState{
|
||||
Running: &corev1api.ContainerStateRunning{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
|
@ -152,13 +152,13 @@ func TestWaitExecHandleHooks(t *testing.T) {
|
|||
podRestoreHookTimeoutAnnotationKey, "1s",
|
||||
podRestoreHookWaitTimeoutAnnotationKey, "1m",
|
||||
)).
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: v1.ContainerState{
|
||||
Running: &v1.ContainerStateRunning{},
|
||||
State: corev1api.ContainerState{
|
||||
Running: &corev1api.ContainerStateRunning{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
|
@ -197,13 +197,13 @@ func TestWaitExecHandleHooks(t *testing.T) {
|
|||
podRestoreHookTimeoutAnnotationKey, "1s",
|
||||
podRestoreHookWaitTimeoutAnnotationKey, "1m",
|
||||
)).
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: v1.ContainerState{
|
||||
Running: &v1.ContainerStateRunning{},
|
||||
State: corev1api.ContainerState{
|
||||
Running: &corev1api.ContainerStateRunning{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
|
@ -221,13 +221,13 @@ func TestWaitExecHandleHooks(t *testing.T) {
|
|||
podRestoreHookTimeoutAnnotationKey, "1s",
|
||||
podRestoreHookWaitTimeoutAnnotationKey, "1m",
|
||||
)).
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: v1.ContainerState{
|
||||
Running: &v1.ContainerStateRunning{},
|
||||
State: corev1api.ContainerState{
|
||||
Running: &corev1api.ContainerStateRunning{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
|
@ -266,13 +266,13 @@ func TestWaitExecHandleHooks(t *testing.T) {
|
|||
podRestoreHookTimeoutAnnotationKey, "1s",
|
||||
podRestoreHookWaitTimeoutAnnotationKey, "1m",
|
||||
)).
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: v1.ContainerState{
|
||||
Running: &v1.ContainerStateRunning{},
|
||||
State: corev1api.ContainerState{
|
||||
Running: &corev1api.ContainerStateRunning{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
|
@ -290,13 +290,13 @@ func TestWaitExecHandleHooks(t *testing.T) {
|
|||
podRestoreHookTimeoutAnnotationKey, "1s",
|
||||
podRestoreHookWaitTimeoutAnnotationKey, "1m",
|
||||
)).
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{},
|
||||
State: corev1api.ContainerState{
|
||||
Waiting: &corev1api.ContainerStateWaiting{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
|
@ -335,13 +335,13 @@ func TestWaitExecHandleHooks(t *testing.T) {
|
|||
podRestoreHookTimeoutAnnotationKey, "1s",
|
||||
podRestoreHookWaitTimeoutAnnotationKey, "1m",
|
||||
)).
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: v1.ContainerState{
|
||||
Running: &v1.ContainerStateRunning{},
|
||||
State: corev1api.ContainerState{
|
||||
Running: &corev1api.ContainerStateRunning{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
|
@ -359,13 +359,13 @@ func TestWaitExecHandleHooks(t *testing.T) {
|
|||
podRestoreHookTimeoutAnnotationKey, "1s",
|
||||
podRestoreHookWaitTimeoutAnnotationKey, "1m",
|
||||
)).
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: v1.ContainerState{
|
||||
Running: &v1.ContainerStateRunning{},
|
||||
State: corev1api.ContainerState{
|
||||
Running: &corev1api.ContainerStateRunning{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
|
@ -376,13 +376,13 @@ func TestWaitExecHandleHooks(t *testing.T) {
|
|||
name: "should return no error when hook from spec executes successfully",
|
||||
groupResource: "pods",
|
||||
initialPod: builder.ForPod("default", "my-pod").
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: v1.ContainerState{
|
||||
Running: &v1.ContainerStateRunning{},
|
||||
State: corev1api.ContainerState{
|
||||
Running: &corev1api.ContainerStateRunning{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
|
@ -408,13 +408,13 @@ func TestWaitExecHandleHooks(t *testing.T) {
|
|||
},
|
||||
pod: builder.ForPod("default", "my-pod").
|
||||
ObjectMeta(builder.WithResourceVersion("1")).
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: v1.ContainerState{
|
||||
Running: &v1.ContainerStateRunning{},
|
||||
State: corev1api.ContainerState{
|
||||
Running: &corev1api.ContainerStateRunning{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
|
@ -425,13 +425,13 @@ func TestWaitExecHandleHooks(t *testing.T) {
|
|||
name: "should return error when spec hook with wait timeout expires with OnError mode Continue",
|
||||
groupResource: "pods",
|
||||
initialPod: builder.ForPod("default", "my-pod").
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{},
|
||||
State: corev1api.ContainerState{
|
||||
Waiting: &corev1api.ContainerStateWaiting{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
|
@ -456,13 +456,13 @@ func TestWaitExecHandleHooks(t *testing.T) {
|
|||
name: "should return an error when spec hook with wait timeout expires with OnError mode Fail",
|
||||
groupResource: "pods",
|
||||
initialPod: builder.ForPod("default", "my-pod").
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{},
|
||||
State: corev1api.ContainerState{
|
||||
Waiting: &corev1api.ContainerStateWaiting{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
|
@ -487,13 +487,13 @@ func TestWaitExecHandleHooks(t *testing.T) {
|
|||
name: "should return an error when shared hooks context is canceled before spec hook with OnError mode Fail executes",
|
||||
groupResource: "pods",
|
||||
initialPod: builder.ForPod("default", "my-pod").
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{},
|
||||
State: corev1api.ContainerState{
|
||||
Waiting: &corev1api.ContainerStateWaiting{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
|
@ -519,13 +519,13 @@ func TestWaitExecHandleHooks(t *testing.T) {
|
|||
expectedErrors: []error{errors.New("hook my-hook-1 in container container1 in pod default/my-pod not executed: context deadline exceeded")},
|
||||
groupResource: "pods",
|
||||
initialPod: builder.ForPod("default", "my-pod").
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{},
|
||||
State: corev1api.ContainerState{
|
||||
Waiting: &corev1api.ContainerStateWaiting{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
|
@ -549,23 +549,23 @@ func TestWaitExecHandleHooks(t *testing.T) {
|
|||
name: "should return no error with 2 spec hooks in 2 different containers, 1st container starts running after 10ms, 2nd container after 20ms, both succeed",
|
||||
groupResource: "pods",
|
||||
initialPod: builder.ForPod("default", "my-pod").
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container2",
|
||||
}).
|
||||
// initially both are waiting
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{},
|
||||
State: corev1api.ContainerState{
|
||||
Waiting: &corev1api.ContainerStateWaiting{},
|
||||
},
|
||||
}).
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container2",
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{},
|
||||
State: corev1api.ContainerState{
|
||||
Waiting: &corev1api.ContainerStateWaiting{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
|
@ -602,23 +602,23 @@ func TestWaitExecHandleHooks(t *testing.T) {
|
|||
error: nil,
|
||||
pod: builder.ForPod("default", "my-pod").
|
||||
ObjectMeta(builder.WithResourceVersion("2")).
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container2",
|
||||
}).
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: v1.ContainerState{
|
||||
Running: &v1.ContainerStateRunning{},
|
||||
State: corev1api.ContainerState{
|
||||
Running: &corev1api.ContainerStateRunning{},
|
||||
},
|
||||
}).
|
||||
// container 2 is still waiting when the first hook executes in container1
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container2",
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{},
|
||||
State: corev1api.ContainerState{
|
||||
Waiting: &corev1api.ContainerStateWaiting{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
|
@ -632,22 +632,22 @@ func TestWaitExecHandleHooks(t *testing.T) {
|
|||
error: nil,
|
||||
pod: builder.ForPod("default", "my-pod").
|
||||
ObjectMeta(builder.WithResourceVersion("3")).
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container2",
|
||||
}).
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: v1.ContainerState{
|
||||
Running: &v1.ContainerStateRunning{},
|
||||
State: corev1api.ContainerState{
|
||||
Running: &corev1api.ContainerStateRunning{},
|
||||
},
|
||||
}).
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container2",
|
||||
State: v1.ContainerState{
|
||||
Running: &v1.ContainerStateRunning{},
|
||||
State: corev1api.ContainerState{
|
||||
Running: &corev1api.ContainerStateRunning{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
|
@ -659,22 +659,22 @@ func TestWaitExecHandleHooks(t *testing.T) {
|
|||
wait: 10 * time.Millisecond,
|
||||
updated: builder.ForPod("default", "my-pod").
|
||||
ObjectMeta(builder.WithResourceVersion("2")).
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container2",
|
||||
}).
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: v1.ContainerState{
|
||||
Running: &v1.ContainerStateRunning{},
|
||||
State: corev1api.ContainerState{
|
||||
Running: &corev1api.ContainerStateRunning{},
|
||||
},
|
||||
}).
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container2",
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{},
|
||||
State: corev1api.ContainerState{
|
||||
Waiting: &corev1api.ContainerStateWaiting{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
|
@ -684,22 +684,22 @@ func TestWaitExecHandleHooks(t *testing.T) {
|
|||
wait: 10 * time.Millisecond,
|
||||
updated: builder.ForPod("default", "my-pod").
|
||||
ObjectMeta(builder.WithResourceVersion("3")).
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container2",
|
||||
}).
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: v1.ContainerState{
|
||||
Running: &v1.ContainerStateRunning{},
|
||||
State: corev1api.ContainerState{
|
||||
Running: &corev1api.ContainerStateRunning{},
|
||||
},
|
||||
}).
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container2",
|
||||
State: v1.ContainerState{
|
||||
Running: &v1.ContainerStateRunning{},
|
||||
State: corev1api.ContainerState{
|
||||
Running: &corev1api.ContainerStateRunning{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
|
@ -758,7 +758,7 @@ func TestWaitExecHandleHooks(t *testing.T) {
|
|||
func TestPodHasContainer(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pod *v1.Pod
|
||||
pod *corev1api.Pod
|
||||
container string
|
||||
expect bool
|
||||
}{
|
||||
|
@ -767,7 +767,7 @@ func TestPodHasContainer(t *testing.T) {
|
|||
expect: true,
|
||||
container: "container1",
|
||||
pod: builder.ForPod("default", "my-pod").
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
Result(),
|
||||
|
@ -777,7 +777,7 @@ func TestPodHasContainer(t *testing.T) {
|
|||
expect: false,
|
||||
container: "container1",
|
||||
pod: builder.ForPod("default", "my-pod").
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container2",
|
||||
}).
|
||||
Result(),
|
||||
|
@ -794,7 +794,7 @@ func TestPodHasContainer(t *testing.T) {
|
|||
func TestIsContainerUp(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pod *v1.Pod
|
||||
pod *corev1api.Pod
|
||||
container string
|
||||
expect bool
|
||||
hooks []PodExecRestoreHook
|
||||
|
@ -804,10 +804,10 @@ func TestIsContainerUp(t *testing.T) {
|
|||
container: "container1",
|
||||
expect: true,
|
||||
pod: builder.ForPod("default", "my-pod").
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: v1.ContainerState{
|
||||
Running: &v1.ContainerStateRunning{},
|
||||
State: corev1api.ContainerState{
|
||||
Running: &corev1api.ContainerStateRunning{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
|
@ -818,10 +818,10 @@ func TestIsContainerUp(t *testing.T) {
|
|||
container: "container1",
|
||||
expect: false,
|
||||
pod: builder.ForPod("default", "my-pod").
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: v1.ContainerState{
|
||||
Running: &v1.ContainerStateRunning{},
|
||||
State: corev1api.ContainerState{
|
||||
Running: &corev1api.ContainerStateRunning{},
|
||||
},
|
||||
Ready: false,
|
||||
}).
|
||||
|
@ -839,10 +839,10 @@ func TestIsContainerUp(t *testing.T) {
|
|||
container: "container1",
|
||||
expect: true,
|
||||
pod: builder.ForPod("default", "my-pod").
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: v1.ContainerState{
|
||||
Running: &v1.ContainerStateRunning{},
|
||||
State: corev1api.ContainerState{
|
||||
Running: &corev1api.ContainerStateRunning{},
|
||||
},
|
||||
Ready: true,
|
||||
}).
|
||||
|
@ -860,9 +860,9 @@ func TestIsContainerUp(t *testing.T) {
|
|||
container: "container1",
|
||||
expect: false,
|
||||
pod: builder.ForPod("default", "my-pod").
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: v1.ContainerState{},
|
||||
State: corev1api.ContainerState{},
|
||||
}).
|
||||
Result(),
|
||||
hooks: []PodExecRestoreHook{},
|
||||
|
@ -872,10 +872,10 @@ func TestIsContainerUp(t *testing.T) {
|
|||
container: "container1",
|
||||
expect: false,
|
||||
pod: builder.ForPod("default", "my-pod").
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{},
|
||||
State: corev1api.ContainerState{
|
||||
Waiting: &corev1api.ContainerStateWaiting{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
|
@ -886,16 +886,16 @@ func TestIsContainerUp(t *testing.T) {
|
|||
container: "container1",
|
||||
expect: true,
|
||||
pod: builder.ForPod("default", "my-pod").
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container0",
|
||||
State: v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{},
|
||||
State: corev1api.ContainerState{
|
||||
Terminated: &corev1api.ContainerStateTerminated{},
|
||||
},
|
||||
},
|
||||
&v1.ContainerStatus{
|
||||
&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: v1.ContainerState{
|
||||
Running: &v1.ContainerStateRunning{},
|
||||
State: corev1api.ContainerState{
|
||||
Running: &corev1api.ContainerStateRunning{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
|
@ -1003,7 +1003,7 @@ func TestRestoreHookTrackerUpdate(t *testing.T) {
|
|||
hook *velerov1api.ExecHook
|
||||
name string
|
||||
error error
|
||||
pod *v1.Pod
|
||||
pod *corev1api.Pod
|
||||
}
|
||||
|
||||
hookTracker1 := NewMultiHookTracker()
|
||||
|
@ -1021,7 +1021,7 @@ func TestRestoreHookTrackerUpdate(t *testing.T) {
|
|||
|
||||
tests1 := []struct {
|
||||
name string
|
||||
initialPod *v1.Pod
|
||||
initialPod *corev1api.Pod
|
||||
groupResource string
|
||||
byContainer map[string][]PodExecRestoreHook
|
||||
expectedExecutions []expectedExecution
|
||||
|
@ -1038,13 +1038,13 @@ func TestRestoreHookTrackerUpdate(t *testing.T) {
|
|||
podRestoreHookTimeoutAnnotationKey, "1s",
|
||||
podRestoreHookWaitTimeoutAnnotationKey, "1m",
|
||||
)).
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: v1.ContainerState{
|
||||
Running: &v1.ContainerStateRunning{},
|
||||
State: corev1api.ContainerState{
|
||||
Running: &corev1api.ContainerStateRunning{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
|
@ -1083,13 +1083,13 @@ func TestRestoreHookTrackerUpdate(t *testing.T) {
|
|||
podRestoreHookTimeoutAnnotationKey, "1s",
|
||||
podRestoreHookWaitTimeoutAnnotationKey, "1m",
|
||||
)).
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: v1.ContainerState{
|
||||
Running: &v1.ContainerStateRunning{},
|
||||
State: corev1api.ContainerState{
|
||||
Running: &corev1api.ContainerStateRunning{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
|
@ -1102,13 +1102,13 @@ func TestRestoreHookTrackerUpdate(t *testing.T) {
|
|||
name: "a hook with OnError mode Fail failed to execute",
|
||||
groupResource: "pods",
|
||||
initialPod: builder.ForPod("default", "my-pod").
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{},
|
||||
State: corev1api.ContainerState{
|
||||
Waiting: &corev1api.ContainerStateWaiting{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
|
@ -1133,13 +1133,13 @@ func TestRestoreHookTrackerUpdate(t *testing.T) {
|
|||
name: "a hook with OnError mode Continue failed to execute",
|
||||
groupResource: "pods",
|
||||
initialPod: builder.ForPod("default", "my-pod").
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{},
|
||||
State: corev1api.ContainerState{
|
||||
Waiting: &corev1api.ContainerStateWaiting{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
|
@ -1164,23 +1164,23 @@ func TestRestoreHookTrackerUpdate(t *testing.T) {
|
|||
name: "two hooks with OnError mode Continue failed to execute",
|
||||
groupResource: "pods",
|
||||
initialPod: builder.ForPod("default", "my-pod").
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container2",
|
||||
}).
|
||||
// initially both are waiting
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{},
|
||||
State: corev1api.ContainerState{
|
||||
Waiting: &corev1api.ContainerStateWaiting{},
|
||||
},
|
||||
}).
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container2",
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{},
|
||||
State: corev1api.ContainerState{
|
||||
Waiting: &corev1api.ContainerStateWaiting{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
|
@ -1217,13 +1217,13 @@ func TestRestoreHookTrackerUpdate(t *testing.T) {
|
|||
name: "a hook was recorded before added to tracker",
|
||||
groupResource: "pods",
|
||||
initialPod: builder.ForPod("default", "my-pod").
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
}).
|
||||
ContainerStatuses(&v1.ContainerStatus{
|
||||
ContainerStatuses(&corev1api.ContainerStatus{
|
||||
Name: "container1",
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{},
|
||||
State: corev1api.ContainerState{
|
||||
Waiting: &corev1api.ContainerStateWaiting{},
|
||||
},
|
||||
}).
|
||||
Result(),
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
"github.com/gobwas/glob"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
@ -63,7 +63,7 @@ type ResourceModifiers struct {
|
|||
ResourceModifierRules []ResourceModifierRule `json:"resourceModifierRules"`
|
||||
}
|
||||
|
||||
func GetResourceModifiersFromConfig(cm *v1.ConfigMap) (*ResourceModifiers, error) {
|
||||
func GetResourceModifiersFromConfig(cm *corev1api.ConfigMap) (*ResourceModifiers, error) {
|
||||
if cm == nil {
|
||||
return nil, fmt.Errorf("could not parse config from nil configmap")
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -31,7 +31,7 @@ import (
|
|||
)
|
||||
|
||||
func TestGetResourceModifiersFromConfig(t *testing.T) {
|
||||
cm1 := &v1.ConfigMap{
|
||||
cm1 := &corev1api.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-configmap",
|
||||
Namespace: "test-namespace",
|
||||
|
@ -64,7 +64,7 @@ func TestGetResourceModifiersFromConfig(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
cm2 := &v1.ConfigMap{
|
||||
cm2 := &corev1api.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-configmap",
|
||||
Namespace: "test-namespace",
|
||||
|
@ -99,7 +99,7 @@ func TestGetResourceModifiersFromConfig(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
cm3 := &v1.ConfigMap{
|
||||
cm3 := &corev1api.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-configmap",
|
||||
Namespace: "test-namespace",
|
||||
|
@ -109,7 +109,7 @@ func TestGetResourceModifiersFromConfig(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
cm4 := &v1.ConfigMap{
|
||||
cm4 := &corev1api.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-configmap",
|
||||
Namespace: "test-namespace",
|
||||
|
@ -135,7 +135,7 @@ func TestGetResourceModifiersFromConfig(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
cm5 := &v1.ConfigMap{
|
||||
cm5 := &corev1api.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-configmap",
|
||||
Namespace: "test-namespace",
|
||||
|
@ -170,7 +170,7 @@ func TestGetResourceModifiersFromConfig(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
cm6 := &v1.ConfigMap{
|
||||
cm6 := &corev1api.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-configmap",
|
||||
Namespace: "test-namespace",
|
||||
|
@ -199,7 +199,7 @@ func TestGetResourceModifiersFromConfig(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
cm7 := &v1.ConfigMap{
|
||||
cm7 := &corev1api.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-configmap",
|
||||
Namespace: "test-namespace",
|
||||
|
@ -228,7 +228,7 @@ func TestGetResourceModifiersFromConfig(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
cm8 := &v1.ConfigMap{
|
||||
cm8 := &corev1api.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-configmap",
|
||||
Namespace: "test-namespace",
|
||||
|
@ -256,7 +256,7 @@ func TestGetResourceModifiersFromConfig(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
cm9 := &v1.ConfigMap{
|
||||
cm9 := &corev1api.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-configmap",
|
||||
Namespace: "test-namespace",
|
||||
|
@ -285,7 +285,7 @@ func TestGetResourceModifiersFromConfig(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
cm10 := &v1.ConfigMap{
|
||||
cm10 := &corev1api.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-configmap",
|
||||
Namespace: "test-namespace",
|
||||
|
@ -316,7 +316,7 @@ func TestGetResourceModifiersFromConfig(t *testing.T) {
|
|||
}
|
||||
|
||||
type args struct {
|
||||
cm *v1.ConfigMap
|
||||
cm *corev1api.ConfigMap
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
crclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
|
@ -185,7 +185,7 @@ func GetResourcePoliciesFromBackup(
|
|||
) (resourcePolicies *Policies, err error) {
|
||||
if backup.Spec.ResourcePolicy != nil &&
|
||||
strings.EqualFold(backup.Spec.ResourcePolicy.Kind, ConfigmapRefType) {
|
||||
policiesConfigMap := &v1.ConfigMap{}
|
||||
policiesConfigMap := &corev1api.ConfigMap{}
|
||||
err = client.Get(
|
||||
context.Background(),
|
||||
crclient.ObjectKey{Namespace: backup.Namespace, Name: backup.Spec.ResourcePolicy.Name},
|
||||
|
@ -214,7 +214,7 @@ func GetResourcePoliciesFromBackup(
|
|||
return resourcePolicies, nil
|
||||
}
|
||||
|
||||
func getResourcePoliciesFromConfig(cm *v1.ConfigMap) (*Policies, error) {
|
||||
func getResourcePoliciesFromConfig(cm *corev1api.ConfigMap) (*Policies, error) {
|
||||
if cm == nil {
|
||||
return nil, fmt.Errorf("could not parse config from nil configmap")
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
@ -328,7 +328,7 @@ func TestGetResourceMatchedAction(t *testing.T) {
|
|||
|
||||
func TestGetResourcePoliciesFromConfig(t *testing.T) {
|
||||
// Create a test ConfigMap
|
||||
cm := &v1.ConfigMap{
|
||||
cm := &corev1api.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-configmap",
|
||||
Namespace: "test-namespace",
|
||||
|
@ -418,9 +418,9 @@ func TestGetMatchAction(t *testing.T) {
|
|||
testCases := []struct {
|
||||
name string
|
||||
yamlData string
|
||||
vol *v1.PersistentVolume
|
||||
podVol *v1.Volume
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
vol *corev1api.PersistentVolume
|
||||
podVol *corev1api.Volume
|
||||
pvc *corev1api.PersistentVolumeClaim
|
||||
skip bool
|
||||
}{
|
||||
{
|
||||
|
@ -431,10 +431,10 @@ volumePolicies:
|
|||
csi: {}
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{Driver: "ebs.csi.aws.com"},
|
||||
vol: &corev1api.PersistentVolume{
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
CSI: &corev1api.CSIPersistentVolumeSource{Driver: "ebs.csi.aws.com"},
|
||||
}},
|
||||
},
|
||||
skip: true,
|
||||
|
@ -447,10 +447,10 @@ volumePolicies:
|
|||
csi: {}
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("1Gi"),
|
||||
vol: &corev1api.PersistentVolume{
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
Capacity: corev1api.ResourceList{
|
||||
corev1api.ResourceStorage: resource.MustParse("1Gi"),
|
||||
}},
|
||||
},
|
||||
skip: false,
|
||||
|
@ -464,10 +464,10 @@ volumePolicies:
|
|||
driver: files.csi.driver
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{Driver: "disks.csi.driver"},
|
||||
vol: &corev1api.PersistentVolume{
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
CSI: &corev1api.CSIPersistentVolumeSource{Driver: "disks.csi.driver"},
|
||||
}},
|
||||
},
|
||||
skip: false,
|
||||
|
@ -481,10 +481,10 @@ volumePolicies:
|
|||
driver: files.csi.driver
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{Driver: "files.csi.driver"},
|
||||
vol: &corev1api.PersistentVolume{
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
CSI: &corev1api.CSIPersistentVolumeSource{Driver: "files.csi.driver"},
|
||||
}},
|
||||
},
|
||||
skip: true,
|
||||
|
@ -501,10 +501,10 @@ volumePolicies:
|
|||
action:
|
||||
type: skip
|
||||
`,
|
||||
vol: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{Driver: "disks.csi.driver"},
|
||||
vol: &corev1api.PersistentVolume{
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
CSI: &corev1api.CSIPersistentVolumeSource{Driver: "disks.csi.driver"},
|
||||
}},
|
||||
},
|
||||
skip: false,
|
||||
|
@ -521,10 +521,10 @@ volumePolicies:
|
|||
action:
|
||||
type: skip
|
||||
`,
|
||||
vol: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{Driver: "files.csi.driver", VolumeAttributes: map[string]string{"key1": "val1"}},
|
||||
vol: &corev1api.PersistentVolume{
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
CSI: &corev1api.CSIPersistentVolumeSource{Driver: "files.csi.driver", VolumeAttributes: map[string]string{"key1": "val1"}},
|
||||
}},
|
||||
},
|
||||
skip: false,
|
||||
|
@ -541,10 +541,10 @@ volumePolicies:
|
|||
action:
|
||||
type: skip
|
||||
`,
|
||||
vol: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{Driver: "files.csi.driver", VolumeAttributes: map[string]string{"protocol": "nfs"}},
|
||||
vol: &corev1api.PersistentVolume{
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
CSI: &corev1api.CSIPersistentVolumeSource{Driver: "files.csi.driver", VolumeAttributes: map[string]string{"protocol": "nfs"}},
|
||||
}},
|
||||
},
|
||||
skip: true,
|
||||
|
@ -565,10 +565,10 @@ volumePolicies:
|
|||
protocol: nfs
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{Driver: "disks.csi.driver", VolumeAttributes: map[string]string{"key1": "val1"}},
|
||||
vol: &corev1api.PersistentVolume{
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
CSI: &corev1api.CSIPersistentVolumeSource{Driver: "disks.csi.driver", VolumeAttributes: map[string]string{"key1": "val1"}},
|
||||
}},
|
||||
},
|
||||
skip: true,
|
||||
|
@ -589,10 +589,10 @@ volumePolicies:
|
|||
protocol: nfs
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{Driver: "files.csi.driver", VolumeAttributes: map[string]string{"key1": "val1"}},
|
||||
vol: &corev1api.PersistentVolume{
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
CSI: &corev1api.CSIPersistentVolumeSource{Driver: "files.csi.driver", VolumeAttributes: map[string]string{"key1": "val1"}},
|
||||
}},
|
||||
},
|
||||
skip: false,
|
||||
|
@ -613,10 +613,10 @@ volumePolicies:
|
|||
protocol: nfs
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{Driver: "files.csi.driver", VolumeAttributes: map[string]string{"key1": "val1", "protocol": "nfs"}},
|
||||
vol: &corev1api.PersistentVolume{
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
CSI: &corev1api.CSIPersistentVolumeSource{Driver: "files.csi.driver", VolumeAttributes: map[string]string{"key1": "val1", "protocol": "nfs"}},
|
||||
}},
|
||||
},
|
||||
skip: true,
|
||||
|
@ -629,13 +629,13 @@ volumePolicies:
|
|||
capacity: "0,100Gi"
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("1Gi"),
|
||||
vol: &corev1api.PersistentVolume{
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
Capacity: corev1api.ResourceList{
|
||||
corev1api.ResourceStorage: resource.MustParse("1Gi"),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{Driver: "ebs.csi.aws.com"},
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
CSI: &corev1api.CSIPersistentVolumeSource{Driver: "ebs.csi.aws.com"},
|
||||
}},
|
||||
},
|
||||
skip: true,
|
||||
|
@ -648,10 +648,10 @@ volumePolicies:
|
|||
nfs: {}
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
NFS: &v1.NFSVolumeSource{Server: "192.168.1.20"},
|
||||
vol: &corev1api.PersistentVolume{
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
NFS: &corev1api.NFSVolumeSource{Server: "192.168.1.20"},
|
||||
}},
|
||||
},
|
||||
skip: true,
|
||||
|
@ -664,13 +664,13 @@ volumePolicies:
|
|||
capacity: "0,100Gi"
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("1Gi"),
|
||||
vol: &corev1api.PersistentVolume{
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
Capacity: corev1api.ResourceList{
|
||||
corev1api.ResourceStorage: resource.MustParse("1Gi"),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
NFS: &v1.NFSVolumeSource{Server: "192.168.1.20"},
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
NFS: &corev1api.NFSVolumeSource{Server: "192.168.1.20"},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -685,10 +685,10 @@ volumePolicies:
|
|||
nfs: {}
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("1Gi"),
|
||||
vol: &corev1api.PersistentVolume{
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
Capacity: corev1api.ResourceList{
|
||||
corev1api.ResourceStorage: resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -705,13 +705,13 @@ volumePolicies:
|
|||
- hostPath
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("1Gi"),
|
||||
vol: &corev1api.PersistentVolume{
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
Capacity: corev1api.ResourceList{
|
||||
corev1api.ResourceStorage: resource.MustParse("1Gi"),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "/mnt/data"},
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
HostPath: &corev1api.HostPathVolumeSource{Path: "/mnt/data"},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -727,13 +727,13 @@ volumePolicies:
|
|||
- local
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("1Gi"),
|
||||
vol: &corev1api.PersistentVolume{
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
Capacity: corev1api.ResourceList{
|
||||
corev1api.ResourceStorage: resource.MustParse("1Gi"),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "/mnt/data"},
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{
|
||||
HostPath: &corev1api.HostPathVolumeSource{Path: "/mnt/data"},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -749,22 +749,22 @@ volumePolicies:
|
|||
environment: production
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
vol: &corev1api.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pv-1",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("1Gi"),
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
Capacity: corev1api.ResourceList{
|
||||
corev1api.ResourceStorage: resource.MustParse("1Gi"),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{},
|
||||
ClaimRef: &v1.ObjectReference{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{},
|
||||
ClaimRef: &corev1api.ObjectReference{
|
||||
Namespace: "default",
|
||||
Name: "pvc-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
pvc: &v1.PersistentVolumeClaim{
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pvc-1",
|
||||
|
@ -783,22 +783,22 @@ volumePolicies:
|
|||
environment: production
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
vol: &corev1api.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pv-1",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("1Gi"),
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
Capacity: corev1api.ResourceList{
|
||||
corev1api.ResourceStorage: resource.MustParse("1Gi"),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{},
|
||||
ClaimRef: &v1.ObjectReference{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{},
|
||||
ClaimRef: &corev1api.ObjectReference{
|
||||
Namespace: "default",
|
||||
Name: "pvc-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
pvc: &v1.PersistentVolumeClaim{
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pvc-1",
|
||||
|
@ -818,22 +818,22 @@ volumePolicies:
|
|||
app: frontend
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
vol: &corev1api.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pv-1",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("1Gi"),
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
Capacity: corev1api.ResourceList{
|
||||
corev1api.ResourceStorage: resource.MustParse("1Gi"),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{},
|
||||
ClaimRef: &v1.ObjectReference{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{},
|
||||
ClaimRef: &corev1api.ObjectReference{
|
||||
Namespace: "default",
|
||||
Name: "pvc-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
pvc: &v1.PersistentVolumeClaim{
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pvc-1",
|
||||
|
@ -852,22 +852,22 @@ volumePolicies:
|
|||
environment: production
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
vol: &corev1api.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pv-2",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("1Gi"),
|
||||
Spec: corev1api.PersistentVolumeSpec{
|
||||
Capacity: corev1api.ResourceList{
|
||||
corev1api.ResourceStorage: resource.MustParse("1Gi"),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{},
|
||||
ClaimRef: &v1.ObjectReference{
|
||||
PersistentVolumeSource: corev1api.PersistentVolumeSource{},
|
||||
ClaimRef: &corev1api.ObjectReference{
|
||||
Namespace: "default",
|
||||
Name: "pvc-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
pvc: &v1.PersistentVolumeClaim{
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pvc-1",
|
||||
|
@ -886,8 +886,8 @@ volumePolicies:
|
|||
action:
|
||||
type: skip`,
|
||||
vol: nil,
|
||||
podVol: &v1.Volume{Name: "pod-vol-1"},
|
||||
pvc: &v1.PersistentVolumeClaim{
|
||||
podVol: &corev1api.Volume{Name: "pod-vol-1"},
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pvc-1",
|
||||
|
@ -906,8 +906,8 @@ volumePolicies:
|
|||
action:
|
||||
type: skip`,
|
||||
vol: nil,
|
||||
podVol: &v1.Volume{Name: "pod-vol-2"},
|
||||
pvc: &v1.PersistentVolumeClaim{
|
||||
podVol: &corev1api.Volume{Name: "pod-vol-2"},
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pvc-2",
|
||||
|
@ -926,8 +926,8 @@ volumePolicies:
|
|||
action:
|
||||
type: skip`,
|
||||
vol: nil,
|
||||
podVol: &v1.Volume{Name: "pod-vol-3"},
|
||||
pvc: &v1.PersistentVolumeClaim{
|
||||
podVol: &corev1api.Volume{Name: "pod-vol-3"},
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pvc-3",
|
||||
|
@ -947,8 +947,8 @@ volumePolicies:
|
|||
action:
|
||||
type: skip`,
|
||||
vol: nil,
|
||||
podVol: &v1.Volume{Name: "pod-vol-4"},
|
||||
pvc: &v1.PersistentVolumeClaim{
|
||||
podVol: &corev1api.Volume{Name: "pod-vol-4"},
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pvc-4",
|
||||
|
@ -1032,13 +1032,13 @@ func TestGetMatchAction_Errors(t *testing.T) {
|
|||
func TestParsePVC(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
pvc *corev1api.PersistentVolumeClaim
|
||||
expectedLabels map[string]string
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
name: "valid PVC with labels",
|
||||
pvc: &v1.PersistentVolumeClaim{
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"env": "prod"},
|
||||
},
|
||||
|
@ -1048,7 +1048,7 @@ func TestParsePVC(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "valid PVC with empty labels",
|
||||
pvc: &v1.PersistentVolumeClaim{
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
|
@ -1058,7 +1058,7 @@ func TestParsePVC(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "nil PVC pointer",
|
||||
pvc: (*v1.PersistentVolumeClaim)(nil),
|
||||
pvc: (*corev1api.PersistentVolumeClaim)(nil),
|
||||
expectedLabels: nil,
|
||||
expectErr: false,
|
||||
},
|
||||
|
|
|
@ -1,18 +1,18 @@
|
|||
package resourcepolicies
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// VolumeFilterData bundles the volume data needed for volume policy filtering
|
||||
type VolumeFilterData struct {
|
||||
PersistentVolume *corev1.PersistentVolume
|
||||
PodVolume *corev1.Volume
|
||||
PVC *corev1.PersistentVolumeClaim
|
||||
PersistentVolume *corev1api.PersistentVolume
|
||||
PodVolume *corev1api.Volume
|
||||
PVC *corev1api.PersistentVolumeClaim
|
||||
}
|
||||
|
||||
// NewVolumeFilterData constructs a new VolumeFilterData instance.
|
||||
func NewVolumeFilterData(pv *corev1.PersistentVolume, podVol *corev1.Volume, pvc *corev1.PersistentVolumeClaim) VolumeFilterData {
|
||||
func NewVolumeFilterData(pv *corev1api.PersistentVolume, podVol *corev1api.Volume, pvc *corev1api.PersistentVolumeClaim) VolumeFilterData {
|
||||
return VolumeFilterData{
|
||||
PersistentVolume: pv,
|
||||
PodVolume: podVol,
|
||||
|
|
|
@ -4,31 +4,31 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func TestNewVolumeFilterData(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
pv *corev1.PersistentVolume
|
||||
podVol *corev1.Volume
|
||||
pvc *corev1.PersistentVolumeClaim
|
||||
pv *corev1api.PersistentVolume
|
||||
podVol *corev1api.Volume
|
||||
pvc *corev1api.PersistentVolumeClaim
|
||||
expectedPVName string
|
||||
expectedPodName string
|
||||
expectedPVCName string
|
||||
}{
|
||||
{
|
||||
name: "all provided",
|
||||
pv: &corev1.PersistentVolume{
|
||||
pv: &corev1api.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pv-test",
|
||||
},
|
||||
},
|
||||
podVol: &corev1.Volume{
|
||||
podVol: &corev1api.Volume{
|
||||
Name: "pod-vol-test",
|
||||
},
|
||||
pvc: &corev1.PersistentVolumeClaim{
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pvc-test",
|
||||
},
|
||||
|
@ -39,7 +39,7 @@ func TestNewVolumeFilterData(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "only PV provided",
|
||||
pv: &corev1.PersistentVolume{
|
||||
pv: &corev1api.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pv-only",
|
||||
},
|
||||
|
@ -53,7 +53,7 @@ func TestNewVolumeFilterData(t *testing.T) {
|
|||
{
|
||||
name: "only PodVolume provided",
|
||||
pv: nil,
|
||||
podVol: &corev1.Volume{
|
||||
podVol: &corev1api.Volume{
|
||||
Name: "pod-only",
|
||||
},
|
||||
pvc: nil,
|
||||
|
@ -65,7 +65,7 @@ func TestNewVolumeFilterData(t *testing.T) {
|
|||
name: "only PVC provided",
|
||||
pv: nil,
|
||||
podVol: nil,
|
||||
pvc: &corev1.PersistentVolumeClaim{
|
||||
pvc: &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pvc-only",
|
||||
},
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -42,7 +42,7 @@ func TestVolumeHelperImpl_ShouldPerformSnapshot(t *testing.T) {
|
|||
name string
|
||||
inputObj runtime.Object
|
||||
groupResource schema.GroupResource
|
||||
pod *corev1.Pod
|
||||
pod *corev1api.Pod
|
||||
resourcePolicies *resourcepolicies.ResourcePolicies
|
||||
snapshotVolumesFlag *bool
|
||||
defaultVolumesToFSBackup bool
|
||||
|
@ -139,10 +139,10 @@ func TestVolumeHelperImpl_ShouldPerformSnapshot(t *testing.T) {
|
|||
inputObj: builder.ForPersistentVolume("example-pv").StorageClass("gp3-csi").ClaimRef("ns", "pvc-1").Result(),
|
||||
groupResource: kuberesource.PersistentVolumes,
|
||||
pod: builder.ForPod("ns", "pod-1").Volumes(
|
||||
&corev1.Volume{
|
||||
&corev1api.Volume{
|
||||
Name: "volume",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-1",
|
||||
},
|
||||
},
|
||||
|
@ -173,10 +173,10 @@ func TestVolumeHelperImpl_ShouldPerformSnapshot(t *testing.T) {
|
|||
pod: builder.ForPod("ns", "pod-1").
|
||||
ObjectMeta(builder.WithAnnotations(velerov1api.VolumesToExcludeAnnotation, "volume")).
|
||||
Volumes(
|
||||
&corev1.Volume{
|
||||
&corev1api.Volume{
|
||||
Name: "volume",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-1",
|
||||
},
|
||||
},
|
||||
|
@ -207,10 +207,10 @@ func TestVolumeHelperImpl_ShouldPerformSnapshot(t *testing.T) {
|
|||
pod: builder.ForPod("ns", "pod-1").
|
||||
ObjectMeta(builder.WithAnnotations(velerov1api.VolumesToBackupAnnotation, "volume")).
|
||||
Volumes(
|
||||
&corev1.Volume{
|
||||
&corev1api.Volume{
|
||||
Name: "volume",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-1",
|
||||
},
|
||||
},
|
||||
|
@ -240,10 +240,10 @@ func TestVolumeHelperImpl_ShouldPerformSnapshot(t *testing.T) {
|
|||
groupResource: kuberesource.PersistentVolumes,
|
||||
pod: builder.ForPod("ns", "pod-1").
|
||||
Volumes(
|
||||
&corev1.Volume{
|
||||
&corev1api.Volume{
|
||||
Name: "volume",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-1",
|
||||
},
|
||||
},
|
||||
|
@ -299,7 +299,7 @@ func TestVolumeHelperImpl_ShouldPerformSnapshot(t *testing.T) {
|
|||
}
|
||||
|
||||
objs := []runtime.Object{
|
||||
&corev1.PersistentVolumeClaim{
|
||||
&corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "ns",
|
||||
Name: "pvc-1",
|
||||
|
@ -348,16 +348,16 @@ func TestVolumeHelperImpl_ShouldPerformSnapshot(t *testing.T) {
|
|||
func TestVolumeHelperImpl_ShouldIncludeVolumeInBackup(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
vol corev1.Volume
|
||||
vol corev1api.Volume
|
||||
backupExcludePVC bool
|
||||
shouldInclude bool
|
||||
}{
|
||||
{
|
||||
name: "volume has host path so do not include",
|
||||
vol: corev1.Volume{
|
||||
vol: corev1api.Volume{
|
||||
Name: "sample-volume",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
HostPath: &corev1.HostPathVolumeSource{
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
HostPath: &corev1api.HostPathVolumeSource{
|
||||
Path: "some-path",
|
||||
},
|
||||
},
|
||||
|
@ -367,12 +367,12 @@ func TestVolumeHelperImpl_ShouldIncludeVolumeInBackup(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "volume has secret mounted so do not include",
|
||||
vol: corev1.Volume{
|
||||
vol: corev1api.Volume{
|
||||
Name: "sample-volume",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
Secret: &corev1api.SecretVolumeSource{
|
||||
SecretName: "sample-secret",
|
||||
Items: []corev1.KeyToPath{
|
||||
Items: []corev1api.KeyToPath{
|
||||
{
|
||||
Key: "username",
|
||||
Path: "my-username",
|
||||
|
@ -386,11 +386,11 @@ func TestVolumeHelperImpl_ShouldIncludeVolumeInBackup(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "volume has configmap so do not include",
|
||||
vol: corev1.Volume{
|
||||
vol: corev1api.Volume{
|
||||
Name: "sample-volume",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
ConfigMap: &corev1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
ConfigMap: &corev1api.ConfigMapVolumeSource{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "sample-cm",
|
||||
},
|
||||
},
|
||||
|
@ -401,11 +401,11 @@ func TestVolumeHelperImpl_ShouldIncludeVolumeInBackup(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "volume is mounted as project volume so do not include",
|
||||
vol: corev1.Volume{
|
||||
vol: corev1api.Volume{
|
||||
Name: "sample-volume",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Projected: &corev1.ProjectedVolumeSource{
|
||||
Sources: []corev1.VolumeProjection{},
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
Projected: &corev1api.ProjectedVolumeSource{
|
||||
Sources: []corev1api.VolumeProjection{},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -414,14 +414,14 @@ func TestVolumeHelperImpl_ShouldIncludeVolumeInBackup(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "volume has downwardAPI so do not include",
|
||||
vol: corev1.Volume{
|
||||
vol: corev1api.Volume{
|
||||
Name: "sample-volume",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
DownwardAPI: &corev1.DownwardAPIVolumeSource{
|
||||
Items: []corev1.DownwardAPIVolumeFile{
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
DownwardAPI: &corev1api.DownwardAPIVolumeSource{
|
||||
Items: []corev1api.DownwardAPIVolumeFile{
|
||||
{
|
||||
Path: "labels",
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
FieldRef: &corev1api.ObjectFieldSelector{
|
||||
FieldPath: "metadata.labels",
|
||||
},
|
||||
},
|
||||
|
@ -434,10 +434,10 @@ func TestVolumeHelperImpl_ShouldIncludeVolumeInBackup(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "volume has pvc and backupExcludePVC is true so do not include",
|
||||
vol: corev1.Volume{
|
||||
vol: corev1api.Volume{
|
||||
Name: "sample-volume",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "sample-pvc",
|
||||
},
|
||||
},
|
||||
|
@ -447,10 +447,10 @@ func TestVolumeHelperImpl_ShouldIncludeVolumeInBackup(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "volume name has prefix default-token so do not include",
|
||||
vol: corev1.Volume{
|
||||
vol: corev1api.Volume{
|
||||
Name: "default-token-vol-name",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "sample-pvc",
|
||||
},
|
||||
},
|
||||
|
@ -495,7 +495,7 @@ func TestVolumeHelperImpl_ShouldIncludeVolumeInBackup(t *testing.T) {
|
|||
func TestVolumeHelperImpl_ShouldPerformFSBackup(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
pod *corev1.Pod
|
||||
pod *corev1api.Pod
|
||||
resources []runtime.Object
|
||||
resourcePolicies *resourcepolicies.ResourcePolicies
|
||||
snapshotVolumesFlag *bool
|
||||
|
@ -507,10 +507,10 @@ func TestVolumeHelperImpl_ShouldPerformFSBackup(t *testing.T) {
|
|||
name: "HostPath volume should be skipped.",
|
||||
pod: builder.ForPod("ns", "pod-1").
|
||||
Volumes(
|
||||
&corev1.Volume{
|
||||
&corev1api.Volume{
|
||||
Name: "",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
HostPath: &corev1.HostPathVolumeSource{
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
HostPath: &corev1api.HostPathVolumeSource{
|
||||
Path: "/mnt/test",
|
||||
},
|
||||
},
|
||||
|
@ -522,10 +522,10 @@ func TestVolumeHelperImpl_ShouldPerformFSBackup(t *testing.T) {
|
|||
name: "VolumePolicy match, return true and no error",
|
||||
pod: builder.ForPod("ns", "pod-1").
|
||||
Volumes(
|
||||
&corev1.Volume{
|
||||
&corev1api.Volume{
|
||||
Name: "",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-1",
|
||||
},
|
||||
},
|
||||
|
@ -533,7 +533,7 @@ func TestVolumeHelperImpl_ShouldPerformFSBackup(t *testing.T) {
|
|||
resources: []runtime.Object{
|
||||
builder.ForPersistentVolumeClaim("ns", "pvc-1").
|
||||
VolumeName("pv-1").
|
||||
StorageClass("gp2-csi").Phase(corev1.ClaimBound).Result(),
|
||||
StorageClass("gp2-csi").Phase(corev1api.ClaimBound).Result(),
|
||||
builder.ForPersistentVolume("pv-1").StorageClass("gp2-csi").Result(),
|
||||
},
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
|
@ -556,10 +556,10 @@ func TestVolumeHelperImpl_ShouldPerformFSBackup(t *testing.T) {
|
|||
name: "Volume source is emptyDir, VolumePolicy match, return true and no error",
|
||||
pod: builder.ForPod("ns", "pod-1").
|
||||
Volumes(
|
||||
&corev1.Volume{
|
||||
&corev1api.Volume{
|
||||
Name: "",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
EmptyDir: &corev1api.EmptyDirVolumeSource{},
|
||||
},
|
||||
}).Result(),
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
|
@ -582,10 +582,10 @@ func TestVolumeHelperImpl_ShouldPerformFSBackup(t *testing.T) {
|
|||
name: "VolumePolicy match, action type is not fs-backup, return false and no error",
|
||||
pod: builder.ForPod("ns", "pod-1").
|
||||
Volumes(
|
||||
&corev1.Volume{
|
||||
&corev1api.Volume{
|
||||
Name: "",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-1",
|
||||
},
|
||||
},
|
||||
|
@ -593,7 +593,7 @@ func TestVolumeHelperImpl_ShouldPerformFSBackup(t *testing.T) {
|
|||
resources: []runtime.Object{
|
||||
builder.ForPersistentVolumeClaim("ns", "pvc-1").
|
||||
VolumeName("pv-1").
|
||||
StorageClass("gp2-csi").Phase(corev1.ClaimBound).Result(),
|
||||
StorageClass("gp2-csi").Phase(corev1api.ClaimBound).Result(),
|
||||
builder.ForPersistentVolume("pv-1").StorageClass("gp2-csi").Result(),
|
||||
},
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
|
@ -617,10 +617,10 @@ func TestVolumeHelperImpl_ShouldPerformFSBackup(t *testing.T) {
|
|||
pod: builder.ForPod("ns", "pod-1").
|
||||
ObjectMeta(builder.WithAnnotations(velerov1api.VolumesToBackupAnnotation, "pvc-1")).
|
||||
Volumes(
|
||||
&corev1.Volume{
|
||||
&corev1api.Volume{
|
||||
Name: "pvc-1",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-1",
|
||||
},
|
||||
},
|
||||
|
@ -628,7 +628,7 @@ func TestVolumeHelperImpl_ShouldPerformFSBackup(t *testing.T) {
|
|||
resources: []runtime.Object{
|
||||
builder.ForPersistentVolumeClaim("ns", "pvc-1").
|
||||
VolumeName("pv-1").
|
||||
StorageClass("gp2-csi").Phase(corev1.ClaimBound).Result(),
|
||||
StorageClass("gp2-csi").Phase(corev1api.ClaimBound).Result(),
|
||||
builder.ForPersistentVolume("pv-1").StorageClass("gp2-csi").Result(),
|
||||
},
|
||||
resourcePolicies: &resourcepolicies.ResourcePolicies{
|
||||
|
@ -652,10 +652,10 @@ func TestVolumeHelperImpl_ShouldPerformFSBackup(t *testing.T) {
|
|||
pod: builder.ForPod("ns", "pod-1").
|
||||
ObjectMeta(builder.WithAnnotations(velerov1api.VolumesToExcludeAnnotation, "pvc-1")).
|
||||
Volumes(
|
||||
&corev1.Volume{
|
||||
&corev1api.Volume{
|
||||
Name: "pvc-1",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-1",
|
||||
},
|
||||
},
|
||||
|
@ -663,7 +663,7 @@ func TestVolumeHelperImpl_ShouldPerformFSBackup(t *testing.T) {
|
|||
resources: []runtime.Object{
|
||||
builder.ForPersistentVolumeClaim("ns", "pvc-1").
|
||||
VolumeName("pv-1").
|
||||
StorageClass("gp2-csi").Phase(corev1.ClaimBound).Result(),
|
||||
StorageClass("gp2-csi").Phase(corev1api.ClaimBound).Result(),
|
||||
builder.ForPersistentVolume("pv-1").StorageClass("gp2-csi").Result(),
|
||||
},
|
||||
defaultVolumesToFSBackup: true,
|
||||
|
@ -711,7 +711,7 @@ func TestGetVolumeFromResource(t *testing.T) {
|
|||
helper := &volumeHelperImpl{}
|
||||
|
||||
t.Run("PersistentVolume input", func(t *testing.T) {
|
||||
pv := &corev1.PersistentVolume{
|
||||
pv := &corev1api.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pv",
|
||||
},
|
||||
|
@ -724,7 +724,7 @@ func TestGetVolumeFromResource(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("Volume input", func(t *testing.T) {
|
||||
vol := &corev1.Volume{
|
||||
vol := &corev1api.Volume{
|
||||
Name: "test-volume",
|
||||
}
|
||||
outPV, outPod, err := helper.getVolumeFromResource(vol)
|
||||
|
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package v1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
|
@ -164,7 +164,7 @@ type BackupSpec struct {
|
|||
ItemOperationTimeout metav1.Duration `json:"itemOperationTimeout,omitempty"`
|
||||
// ResourcePolicy specifies the referenced resource policies that backup should follow
|
||||
// +optional
|
||||
ResourcePolicy *v1.TypedLocalObjectReference `json:"resourcePolicy,omitempty"`
|
||||
ResourcePolicy *corev1api.TypedLocalObjectReference `json:"resourcePolicy,omitempty"`
|
||||
|
||||
// SnapshotMoveData specifies whether snapshot data should be moved
|
||||
// +optional
|
||||
|
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package v1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
@ -123,7 +123,7 @@ type RestoreSpec struct {
|
|||
// ResourceModifier specifies the reference to JSON resource patches that should be applied to resources before restoration.
|
||||
// +optional
|
||||
// +nullable
|
||||
ResourceModifier *v1.TypedLocalObjectReference `json:"resourceModifier,omitempty"`
|
||||
ResourceModifier *corev1api.TypedLocalObjectReference `json:"resourceModifier,omitempty"`
|
||||
|
||||
// UploaderConfig specifies the configuration for the restore.
|
||||
// +optional
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
@ -60,57 +59,57 @@ func TestBackupPVAction(t *testing.T) {
|
|||
assert.Empty(t, additional)
|
||||
|
||||
// Action should clean the spec.Selector when the StorageClassName is not set.
|
||||
input := builder.ForPersistentVolumeClaim("abc", "abc").VolumeName("pv").Selector(&metav1.LabelSelector{MatchLabels: map[string]string{"abc": "abc"}}).Phase(corev1.ClaimBound).Result()
|
||||
input := builder.ForPersistentVolumeClaim("abc", "abc").VolumeName("pv").Selector(&metav1.LabelSelector{MatchLabels: map[string]string{"abc": "abc"}}).Phase(corev1api.ClaimBound).Result()
|
||||
inputUnstructured, err := runtime.DefaultUnstructuredConverter.ToUnstructured(input)
|
||||
require.NoError(t, err)
|
||||
item, additional, err := a.Execute(&unstructured.Unstructured{Object: inputUnstructured}, backup)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, additional, 1)
|
||||
modifiedPVC := new(corev1.PersistentVolumeClaim)
|
||||
modifiedPVC := new(corev1api.PersistentVolumeClaim)
|
||||
require.NoError(t, runtime.DefaultUnstructuredConverter.FromUnstructured(item.UnstructuredContent(), modifiedPVC))
|
||||
require.Nil(t, modifiedPVC.Spec.Selector)
|
||||
|
||||
// Action should clean the spec.Selector when the StorageClassName is set to specific StorageClass
|
||||
input2 := builder.ForPersistentVolumeClaim("abc", "abc").VolumeName("pv").StorageClass("sc1").Selector(&metav1.LabelSelector{MatchLabels: map[string]string{"abc": "abc"}}).Phase(corev1.ClaimBound).Result()
|
||||
input2 := builder.ForPersistentVolumeClaim("abc", "abc").VolumeName("pv").StorageClass("sc1").Selector(&metav1.LabelSelector{MatchLabels: map[string]string{"abc": "abc"}}).Phase(corev1api.ClaimBound).Result()
|
||||
inputUnstructured2, err2 := runtime.DefaultUnstructuredConverter.ToUnstructured(input2)
|
||||
require.NoError(t, err2)
|
||||
item2, additional2, err2 := a.Execute(&unstructured.Unstructured{Object: inputUnstructured2}, backup)
|
||||
require.NoError(t, err2)
|
||||
require.Len(t, additional2, 1)
|
||||
modifiedPVC2 := new(corev1.PersistentVolumeClaim)
|
||||
modifiedPVC2 := new(corev1api.PersistentVolumeClaim)
|
||||
require.NoError(t, runtime.DefaultUnstructuredConverter.FromUnstructured(item2.UnstructuredContent(), modifiedPVC2))
|
||||
require.Nil(t, modifiedPVC2.Spec.Selector)
|
||||
|
||||
// Action should keep the spec.Selector when the StorageClassName is set to ""
|
||||
input3 := builder.ForPersistentVolumeClaim("abc", "abc").StorageClass("").Selector(&metav1.LabelSelector{MatchLabels: map[string]string{"abc": "abc"}}).VolumeName("pv").Phase(corev1.ClaimBound).Result()
|
||||
input3 := builder.ForPersistentVolumeClaim("abc", "abc").StorageClass("").Selector(&metav1.LabelSelector{MatchLabels: map[string]string{"abc": "abc"}}).VolumeName("pv").Phase(corev1api.ClaimBound).Result()
|
||||
inputUnstructured3, err3 := runtime.DefaultUnstructuredConverter.ToUnstructured(input3)
|
||||
require.NoError(t, err3)
|
||||
item3, additional3, err3 := a.Execute(&unstructured.Unstructured{Object: inputUnstructured3}, backup)
|
||||
require.NoError(t, err3)
|
||||
require.Len(t, additional3, 1)
|
||||
modifiedPVC3 := new(corev1.PersistentVolumeClaim)
|
||||
modifiedPVC3 := new(corev1api.PersistentVolumeClaim)
|
||||
require.NoError(t, runtime.DefaultUnstructuredConverter.FromUnstructured(item3.UnstructuredContent(), modifiedPVC3))
|
||||
require.Equal(t, input3.Spec.Selector, modifiedPVC3.Spec.Selector)
|
||||
|
||||
// Action should delete label started with"velero.io/" from the spec.Selector when the StorageClassName is set to ""
|
||||
input4 := builder.ForPersistentVolumeClaim("abc", "abc").StorageClass("").Selector(&metav1.LabelSelector{MatchLabels: map[string]string{"velero.io/abc": "abc", "abc": "abc"}}).VolumeName("pv").Phase(corev1.ClaimBound).Result()
|
||||
input4 := builder.ForPersistentVolumeClaim("abc", "abc").StorageClass("").Selector(&metav1.LabelSelector{MatchLabels: map[string]string{"velero.io/abc": "abc", "abc": "abc"}}).VolumeName("pv").Phase(corev1api.ClaimBound).Result()
|
||||
inputUnstructured4, err4 := runtime.DefaultUnstructuredConverter.ToUnstructured(input4)
|
||||
require.NoError(t, err4)
|
||||
item4, additional4, err4 := a.Execute(&unstructured.Unstructured{Object: inputUnstructured4}, backup)
|
||||
require.NoError(t, err4)
|
||||
require.Len(t, additional4, 1)
|
||||
modifiedPVC4 := new(corev1.PersistentVolumeClaim)
|
||||
modifiedPVC4 := new(corev1api.PersistentVolumeClaim)
|
||||
require.NoError(t, runtime.DefaultUnstructuredConverter.FromUnstructured(item4.UnstructuredContent(), modifiedPVC4))
|
||||
require.Equal(t, &metav1.LabelSelector{MatchLabels: map[string]string{"abc": "abc"}}, modifiedPVC4.Spec.Selector)
|
||||
|
||||
// Action should clean the spec.Selector when the StorageClassName has value
|
||||
input5 := builder.ForPersistentVolumeClaim("abc", "abc").StorageClass("sc1").Selector(&metav1.LabelSelector{MatchLabels: map[string]string{"velero.io/abc": "abc", "abc": "abc"}}).VolumeName("pv").Phase(corev1.ClaimBound).Result()
|
||||
input5 := builder.ForPersistentVolumeClaim("abc", "abc").StorageClass("sc1").Selector(&metav1.LabelSelector{MatchLabels: map[string]string{"velero.io/abc": "abc", "abc": "abc"}}).VolumeName("pv").Phase(corev1api.ClaimBound).Result()
|
||||
inputUnstructured5, err5 := runtime.DefaultUnstructuredConverter.ToUnstructured(input5)
|
||||
require.NoError(t, err5)
|
||||
item5, additional5, err5 := a.Execute(&unstructured.Unstructured{Object: inputUnstructured5}, backup)
|
||||
require.NoError(t, err5)
|
||||
require.Len(t, additional5, 1)
|
||||
modifiedPVC5 := new(corev1.PersistentVolumeClaim)
|
||||
modifiedPVC5 := new(corev1api.PersistentVolumeClaim)
|
||||
require.NoError(t, runtime.DefaultUnstructuredConverter.FromUnstructured(item5.UnstructuredContent(), modifiedPVC5))
|
||||
require.Nil(t, modifiedPVC5.Spec.Selector)
|
||||
|
||||
|
|
|
@ -28,8 +28,8 @@ import (
|
|||
v1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
storagev1api "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
@ -52,16 +52,16 @@ func TestExecute(t *testing.T) {
|
|||
tests := []struct {
|
||||
name string
|
||||
backup *velerov1api.Backup
|
||||
pvc *corev1.PersistentVolumeClaim
|
||||
pv *corev1.PersistentVolume
|
||||
sc *storagev1.StorageClass
|
||||
pvc *corev1api.PersistentVolumeClaim
|
||||
pv *corev1api.PersistentVolume
|
||||
sc *storagev1api.StorageClass
|
||||
vsClass *snapshotv1api.VolumeSnapshotClass
|
||||
operationID string
|
||||
expectedErr error
|
||||
expectedBackup *velerov1api.Backup
|
||||
expectedDataUpload *velerov2alpha1.DataUpload
|
||||
expectedPVC *corev1.PersistentVolumeClaim
|
||||
resourcePolicy *corev1.ConfigMap
|
||||
expectedPVC *corev1api.PersistentVolumeClaim
|
||||
resourcePolicy *corev1api.ConfigMap
|
||||
}{
|
||||
{
|
||||
name: "Skip PVC BIA when backup is in finalizing phase",
|
||||
|
@ -71,7 +71,7 @@ func TestExecute(t *testing.T) {
|
|||
{
|
||||
name: "Test SnapshotMoveData",
|
||||
backup: builder.ForBackup("velero", "test").SnapshotMoveData(true).CSISnapshotTimeout(1 * time.Minute).Result(),
|
||||
pvc: builder.ForPersistentVolumeClaim("velero", "testPVC").VolumeName("testPV").StorageClass("testSC").Phase(corev1.ClaimBound).Result(),
|
||||
pvc: builder.ForPersistentVolumeClaim("velero", "testPVC").VolumeName("testPV").StorageClass("testSC").Phase(corev1api.ClaimBound).Result(),
|
||||
pv: builder.ForPersistentVolume("testPV").CSI("hostpath", "testVolume").Result(),
|
||||
sc: builder.ForStorageClass("testSC").Provisioner("hostpath").Result(),
|
||||
vsClass: builder.ForVolumeSnapshotClass("testVSClass").Driver("hostpath").ObjectMeta(builder.WithLabels(velerov1api.VolumeSnapshotClassSelectorLabel, "")).Result(),
|
||||
|
@ -117,7 +117,7 @@ func TestExecute(t *testing.T) {
|
|||
{
|
||||
name: "Verify PVC is modified as expected",
|
||||
backup: builder.ForBackup("velero", "test").SnapshotMoveData(true).CSISnapshotTimeout(1 * time.Minute).Result(),
|
||||
pvc: builder.ForPersistentVolumeClaim("velero", "testPVC").VolumeName("testPV").StorageClass("testSC").Phase(corev1.ClaimBound).Result(),
|
||||
pvc: builder.ForPersistentVolumeClaim("velero", "testPVC").VolumeName("testPV").StorageClass("testSC").Phase(corev1api.ClaimBound).Result(),
|
||||
pv: builder.ForPersistentVolume("testPV").CSI("hostpath", "testVolume").Result(),
|
||||
sc: builder.ForStorageClass("testSC").Provisioner("hostpath").Result(),
|
||||
vsClass: builder.ForVolumeSnapshotClass("tescVSClass").Driver("hostpath").ObjectMeta(builder.WithLabels(velerov1api.VolumeSnapshotClassSelectorLabel, "")).Result(),
|
||||
|
@ -126,13 +126,13 @@ func TestExecute(t *testing.T) {
|
|||
expectedPVC: builder.ForPersistentVolumeClaim("velero", "testPVC").
|
||||
ObjectMeta(builder.WithAnnotations(velerov1api.MustIncludeAdditionalItemAnnotation, "true", velerov1api.DataUploadNameAnnotation, "velero/"),
|
||||
builder.WithLabels(velerov1api.BackupNameLabel, "test")).
|
||||
VolumeName("testPV").StorageClass("testSC").Phase(corev1.ClaimBound).Result(),
|
||||
VolumeName("testPV").StorageClass("testSC").Phase(corev1api.ClaimBound).Result(),
|
||||
},
|
||||
{
|
||||
name: "Test ResourcePolicy",
|
||||
backup: builder.ForBackup("velero", "test").ResourcePolicies("resourcePolicy").SnapshotVolumes(false).Result(),
|
||||
resourcePolicy: builder.ForConfigMap("velero", "resourcePolicy").Data("policy", "{\"version\":\"v1\", \"volumePolicies\":[{\"conditions\":{\"csi\": {}},\"action\":{\"type\":\"snapshot\"}}]}").Result(),
|
||||
pvc: builder.ForPersistentVolumeClaim("velero", "testPVC").VolumeName("testPV").StorageClass("testSC").Phase(corev1.ClaimBound).Result(),
|
||||
pvc: builder.ForPersistentVolumeClaim("velero", "testPVC").VolumeName("testPV").StorageClass("testSC").Phase(corev1api.ClaimBound).Result(),
|
||||
pv: builder.ForPersistentVolume("testPV").CSI("hostpath", "testVolume").Result(),
|
||||
sc: builder.ForStorageClass("testSC").Provisioner("hostpath").Result(),
|
||||
vsClass: builder.ForVolumeSnapshotClass("tescVSClass").Driver("hostpath").ObjectMeta(builder.WithLabels(velerov1api.VolumeSnapshotClassSelectorLabel, "")).Result(),
|
||||
|
@ -217,10 +217,10 @@ func TestExecute(t *testing.T) {
|
|||
}
|
||||
|
||||
if tc.expectedPVC != nil {
|
||||
resultPVC := new(corev1.PersistentVolumeClaim)
|
||||
resultPVC := new(corev1api.PersistentVolumeClaim)
|
||||
runtime.DefaultUnstructuredConverter.FromUnstructured(resultUnstructed.UnstructuredContent(), resultPVC)
|
||||
|
||||
require.True(t, cmp.Equal(tc.expectedPVC, resultPVC, cmpopts.IgnoreFields(corev1.PersistentVolumeClaim{}, "ResourceVersion", "Annotations", "Labels")))
|
||||
require.True(t, cmp.Equal(tc.expectedPVC, resultPVC, cmpopts.IgnoreFields(corev1api.PersistentVolumeClaim{}, "ResourceVersion", "Annotations", "Labels")))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
rbacbeta "k8s.io/api/rbac/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -34,7 +34,7 @@ import (
|
|||
"github.com/vmware-tanzu/velero/pkg/util/actionhelpers"
|
||||
)
|
||||
|
||||
func newV1ClusterRoleBindingList(rbacCRBList []rbac.ClusterRoleBinding) []actionhelpers.ClusterRoleBinding {
|
||||
func newV1ClusterRoleBindingList(rbacCRBList []rbacv1.ClusterRoleBinding) []actionhelpers.ClusterRoleBinding {
|
||||
var crbs []actionhelpers.ClusterRoleBinding
|
||||
for _, c := range rbacCRBList {
|
||||
crbs = append(crbs, actionhelpers.V1ClusterRoleBinding{Crb: c})
|
||||
|
@ -53,7 +53,7 @@ func newV1beta1ClusterRoleBindingList(rbacCRBList []rbacbeta.ClusterRoleBinding)
|
|||
}
|
||||
|
||||
type FakeV1ClusterRoleBindingLister struct {
|
||||
v1crbs []rbac.ClusterRoleBinding
|
||||
v1crbs []rbacv1.ClusterRoleBinding
|
||||
}
|
||||
|
||||
func (f FakeV1ClusterRoleBindingLister) List() ([]actionhelpers.ClusterRoleBinding, error) {
|
||||
|
@ -98,17 +98,17 @@ func TestNewServiceAccountAction(t *testing.T) {
|
|||
}{
|
||||
{
|
||||
name: "rbac v1 API instantiates an saAction",
|
||||
version: rbac.SchemeGroupVersion.Version,
|
||||
version: rbacv1.SchemeGroupVersion.Version,
|
||||
expectedCRBs: []actionhelpers.ClusterRoleBinding{
|
||||
actionhelpers.V1ClusterRoleBinding{
|
||||
Crb: rbac.ClusterRoleBinding{
|
||||
Crb: rbacv1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "v1crb-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
actionhelpers.V1ClusterRoleBinding{
|
||||
Crb: rbac.ClusterRoleBinding{
|
||||
Crb: rbacv1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "v1crb-2",
|
||||
},
|
||||
|
@ -146,7 +146,7 @@ func TestNewServiceAccountAction(t *testing.T) {
|
|||
discoveryHelper := velerotest.FakeDiscoveryHelper{}
|
||||
logger := velerotest.NewLogger()
|
||||
|
||||
v1crbs := []rbac.ClusterRoleBinding{
|
||||
v1crbs := []rbacv1.ClusterRoleBinding{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "v1crb-1",
|
||||
|
@ -173,7 +173,7 @@ func TestNewServiceAccountAction(t *testing.T) {
|
|||
}
|
||||
|
||||
clusterRoleBindingListers := map[string]actionhelpers.ClusterRoleBindingLister{
|
||||
rbac.SchemeGroupVersion.Version: FakeV1ClusterRoleBindingLister{v1crbs: v1crbs},
|
||||
rbacv1.SchemeGroupVersion.Version: FakeV1ClusterRoleBindingLister{v1crbs: v1crbs},
|
||||
rbacbeta.SchemeGroupVersion.Version: FakeV1beta1ClusterRoleBindingLister{v1beta1crbs: v1beta1crbs},
|
||||
"": actionhelpers.NoopClusterRoleBindingLister{},
|
||||
}
|
||||
|
@ -183,7 +183,7 @@ func TestNewServiceAccountAction(t *testing.T) {
|
|||
// We only care about the preferred version, nothing else in the list
|
||||
discoveryHelper.APIGroupsList = []metav1.APIGroup{
|
||||
{
|
||||
Name: rbac.GroupName,
|
||||
Name: rbacv1.GroupName,
|
||||
PreferredVersion: metav1.GroupVersionForDiscovery{
|
||||
Version: test.version,
|
||||
},
|
||||
|
@ -200,7 +200,7 @@ func TestServiceAccountActionExecute(t *testing.T) {
|
|||
tests := []struct {
|
||||
name string
|
||||
serviceAccount runtime.Unstructured
|
||||
crbs []rbac.ClusterRoleBinding
|
||||
crbs []rbacv1.ClusterRoleBinding
|
||||
expectedAdditionalItems []velero.ResourceIdentifier
|
||||
}{
|
||||
{
|
||||
|
@ -230,9 +230,9 @@ func TestServiceAccountActionExecute(t *testing.T) {
|
|||
}
|
||||
}
|
||||
`),
|
||||
crbs: []rbac.ClusterRoleBinding{
|
||||
crbs: []rbacv1.ClusterRoleBinding{
|
||||
{
|
||||
Subjects: []rbac.Subject{
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "non-matching-kind",
|
||||
Namespace: "non-matching-ns",
|
||||
|
@ -244,17 +244,17 @@ func TestServiceAccountActionExecute(t *testing.T) {
|
|||
Name: "velero",
|
||||
},
|
||||
{
|
||||
Kind: rbac.ServiceAccountKind,
|
||||
Kind: rbacv1.ServiceAccountKind,
|
||||
Namespace: "non-matching-ns",
|
||||
Name: "velero",
|
||||
},
|
||||
{
|
||||
Kind: rbac.ServiceAccountKind,
|
||||
Kind: rbacv1.ServiceAccountKind,
|
||||
Namespace: "velero",
|
||||
Name: "non-matching-name",
|
||||
},
|
||||
},
|
||||
RoleRef: rbac.RoleRef{
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
Name: "role",
|
||||
},
|
||||
},
|
||||
|
@ -273,19 +273,19 @@ func TestServiceAccountActionExecute(t *testing.T) {
|
|||
}
|
||||
}
|
||||
`),
|
||||
crbs: []rbac.ClusterRoleBinding{
|
||||
crbs: []rbacv1.ClusterRoleBinding{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "crb-1",
|
||||
},
|
||||
Subjects: []rbac.Subject{
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "non-matching-kind",
|
||||
Namespace: "non-matching-ns",
|
||||
Name: "non-matching-name",
|
||||
},
|
||||
},
|
||||
RoleRef: rbac.RoleRef{
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
Name: "role-1",
|
||||
},
|
||||
},
|
||||
|
@ -293,19 +293,19 @@ func TestServiceAccountActionExecute(t *testing.T) {
|
|||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "crb-2",
|
||||
},
|
||||
Subjects: []rbac.Subject{
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "non-matching-kind",
|
||||
Namespace: "non-matching-ns",
|
||||
Name: "non-matching-name",
|
||||
},
|
||||
{
|
||||
Kind: rbac.ServiceAccountKind,
|
||||
Kind: rbacv1.ServiceAccountKind,
|
||||
Namespace: "velero",
|
||||
Name: "velero",
|
||||
},
|
||||
},
|
||||
RoleRef: rbac.RoleRef{
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
Name: "role-2",
|
||||
},
|
||||
},
|
||||
|
@ -313,14 +313,14 @@ func TestServiceAccountActionExecute(t *testing.T) {
|
|||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "crb-3",
|
||||
},
|
||||
Subjects: []rbac.Subject{
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: rbac.ServiceAccountKind,
|
||||
Kind: rbacv1.ServiceAccountKind,
|
||||
Namespace: "velero",
|
||||
Name: "velero",
|
||||
},
|
||||
},
|
||||
RoleRef: rbac.RoleRef{
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
Name: "role-3",
|
||||
},
|
||||
},
|
||||
|
@ -328,9 +328,9 @@ func TestServiceAccountActionExecute(t *testing.T) {
|
|||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "crb-4",
|
||||
},
|
||||
Subjects: []rbac.Subject{
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: rbac.ServiceAccountKind,
|
||||
Kind: rbacv1.ServiceAccountKind,
|
||||
Namespace: "velero",
|
||||
Name: "velero",
|
||||
},
|
||||
|
@ -340,7 +340,7 @@ func TestServiceAccountActionExecute(t *testing.T) {
|
|||
Name: "non-matching-name",
|
||||
},
|
||||
},
|
||||
RoleRef: rbac.RoleRef{
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
Name: "role-4",
|
||||
},
|
||||
},
|
||||
|
|
|
@ -35,7 +35,7 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
@ -1399,12 +1399,12 @@ func TestBackupItemActionsForSkippedPV(t *testing.T) {
|
|||
builder.ForPersistentVolume("pv-1").StorageClass("gp2").Result(),
|
||||
),
|
||||
test.PVCs(
|
||||
builder.ForPersistentVolumeClaim("ns-1", "pvc-1").VolumeName("pv-1").StorageClass("gp2").Phase(corev1.ClaimBound).Result(),
|
||||
builder.ForPersistentVolumeClaim("ns-1", "pvc-1").VolumeName("pv-1").StorageClass("gp2").Phase(corev1api.ClaimBound).Result(),
|
||||
),
|
||||
},
|
||||
runtimeResources: []runtime.Object{
|
||||
builder.ForPersistentVolume("pv-1").StorageClass("gp2").Result(),
|
||||
builder.ForPersistentVolumeClaim("ns-1", "pvc-1").VolumeName("pv-1").StorageClass("gp2").Phase(corev1.ClaimBound).Result(),
|
||||
builder.ForPersistentVolumeClaim("ns-1", "pvc-1").VolumeName("pv-1").StorageClass("gp2").Phase(corev1api.ClaimBound).Result(),
|
||||
},
|
||||
actions: []*recordResourcesAction{
|
||||
new(recordResourcesAction).WithName(csiBIAPluginName).ForNamespace("ns-1").ForResource("persistentvolumeclaims").WithSkippedCSISnapshotFlag(true),
|
||||
|
@ -1433,11 +1433,11 @@ func TestBackupItemActionsForSkippedPV(t *testing.T) {
|
|||
},
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVCs(
|
||||
builder.ForPersistentVolumeClaim("ns-1", "pvc-1").VolumeName("pv-1").Phase(corev1.ClaimBound).Result(),
|
||||
builder.ForPersistentVolumeClaim("ns-1", "pvc-1").VolumeName("pv-1").Phase(corev1api.ClaimBound).Result(),
|
||||
),
|
||||
},
|
||||
runtimeResources: []runtime.Object{
|
||||
builder.ForPersistentVolumeClaim("ns-1", "pvc-1").VolumeName("pv-1").Phase(corev1.ClaimBound).Result(),
|
||||
builder.ForPersistentVolumeClaim("ns-1", "pvc-1").VolumeName("pv-1").Phase(corev1api.ClaimBound).Result(),
|
||||
builder.ForPersistentVolume("pv-1").StorageClass("gp2").Result(),
|
||||
},
|
||||
actions: []*recordResourcesAction{
|
||||
|
@ -4014,7 +4014,7 @@ type fakePodVolumeBackupper struct {
|
|||
|
||||
// BackupPodVolumes returns one pod volume backup per entry in volumes, with namespace "velero"
|
||||
// and name "pvb-<pod-namespace>-<pod-name>-<volume-name>".
|
||||
func (b *fakePodVolumeBackupper) BackupPodVolumes(backup *velerov1.Backup, pod *corev1.Pod, volumes []string, _ *resourcepolicies.Policies, _ logrus.FieldLogger) ([]*velerov1.PodVolumeBackup, *podvolume.PVCBackupSummary, []error) {
|
||||
func (b *fakePodVolumeBackupper) BackupPodVolumes(backup *velerov1.Backup, pod *corev1api.Pod, volumes []string, _ *resourcepolicies.Policies, _ logrus.FieldLogger) ([]*velerov1.PodVolumeBackup, *podvolume.PVCBackupSummary, []error) {
|
||||
var res []*velerov1.PodVolumeBackup
|
||||
pvcSummary := podvolume.NewPVCBackupSummary()
|
||||
|
||||
|
@ -4064,7 +4064,7 @@ func TestBackupWithPodVolume(t *testing.T) {
|
|||
name string
|
||||
backup *velerov1.Backup
|
||||
apiResources []*test.APIResource
|
||||
pod *corev1.Pod
|
||||
pod *corev1api.Pod
|
||||
vsl *velerov1.VolumeSnapshotLocation
|
||||
snapshotterGetter volumeSnapshotterGetter
|
||||
want []*velerov1.PodVolumeBackup
|
||||
|
@ -4076,10 +4076,10 @@ func TestBackupWithPodVolume(t *testing.T) {
|
|||
test.Pods(
|
||||
builder.ForPod("ns-1", "pod-1").
|
||||
ObjectMeta(builder.WithAnnotations("backup.velero.io/backup-volumes", "foo")).
|
||||
Volumes(&corev1.Volume{
|
||||
Volumes(&corev1api.Volume{
|
||||
Name: "foo",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "foo",
|
||||
},
|
||||
},
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -155,7 +155,7 @@ func TestItemCollectorBackupNamespaces(t *testing.T) {
|
|||
tests := []struct {
|
||||
name string
|
||||
ie *collections.IncludesExcludes
|
||||
namespaces []*corev1.Namespace
|
||||
namespaces []*corev1api.Namespace
|
||||
backup *velerov1api.Backup
|
||||
expectedTrackedNS []string
|
||||
}{
|
||||
|
@ -163,7 +163,7 @@ func TestItemCollectorBackupNamespaces(t *testing.T) {
|
|||
name: "ns filter by namespace IE filter",
|
||||
backup: builder.ForBackup("velero", "backup").Result(),
|
||||
ie: collections.NewIncludesExcludes().Includes("ns1"),
|
||||
namespaces: []*corev1.Namespace{
|
||||
namespaces: []*corev1api.Namespace{
|
||||
builder.ForNamespace("ns1").Result(),
|
||||
builder.ForNamespace("ns2").Result(),
|
||||
},
|
||||
|
@ -175,7 +175,7 @@ func TestItemCollectorBackupNamespaces(t *testing.T) {
|
|||
MatchLabels: map[string]string{"name": "ns1"},
|
||||
}).Result(),
|
||||
ie: collections.NewIncludesExcludes().Includes("*"),
|
||||
namespaces: []*corev1.Namespace{
|
||||
namespaces: []*corev1api.Namespace{
|
||||
builder.ForNamespace("ns1").ObjectMeta(builder.WithLabels("name", "ns1")).Result(),
|
||||
builder.ForNamespace("ns2").Result(),
|
||||
},
|
||||
|
@ -187,7 +187,7 @@ func TestItemCollectorBackupNamespaces(t *testing.T) {
|
|||
{MatchLabels: map[string]string{"name": "ns1"}},
|
||||
}).Result(),
|
||||
ie: collections.NewIncludesExcludes().Includes("*"),
|
||||
namespaces: []*corev1.Namespace{
|
||||
namespaces: []*corev1api.Namespace{
|
||||
builder.ForNamespace("ns1").ObjectMeta(builder.WithLabels("name", "ns1")).Result(),
|
||||
builder.ForNamespace("ns2").Result(),
|
||||
},
|
||||
|
@ -199,7 +199,7 @@ func TestItemCollectorBackupNamespaces(t *testing.T) {
|
|||
MatchLabels: map[string]string{"name": "ns1"},
|
||||
}).Result(),
|
||||
ie: collections.NewIncludesExcludes().Excludes("ns1"),
|
||||
namespaces: []*corev1.Namespace{
|
||||
namespaces: []*corev1api.Namespace{
|
||||
builder.ForNamespace("ns1").ObjectMeta(builder.WithLabels("name", "ns1")).Result(),
|
||||
builder.ForNamespace("ns2").Result(),
|
||||
},
|
||||
|
@ -211,7 +211,7 @@ func TestItemCollectorBackupNamespaces(t *testing.T) {
|
|||
{MatchLabels: map[string]string{"name": "ns1"}},
|
||||
}).Result(),
|
||||
ie: collections.NewIncludesExcludes().Excludes("ns1", "ns2"),
|
||||
namespaces: []*corev1.Namespace{
|
||||
namespaces: []*corev1api.Namespace{
|
||||
builder.ForNamespace("ns1").ObjectMeta(builder.WithLabels("name", "ns1")).Result(),
|
||||
builder.ForNamespace("ns2").Result(),
|
||||
builder.ForNamespace("ns3").Result(),
|
||||
|
@ -222,7 +222,7 @@ func TestItemCollectorBackupNamespaces(t *testing.T) {
|
|||
name: "No ns filters",
|
||||
backup: builder.ForBackup("velero", "backup").Result(),
|
||||
ie: collections.NewIncludesExcludes().Includes("*"),
|
||||
namespaces: []*corev1.Namespace{
|
||||
namespaces: []*corev1api.Namespace{
|
||||
builder.ForNamespace("ns1").ObjectMeta(builder.WithLabels("name", "ns1")).Result(),
|
||||
builder.ForNamespace("ns2").Result(),
|
||||
},
|
||||
|
@ -232,7 +232,7 @@ func TestItemCollectorBackupNamespaces(t *testing.T) {
|
|||
name: "ns specified by the IncludeNamespaces cannot be found",
|
||||
backup: builder.ForBackup("velero", "backup").IncludedNamespaces("ns1", "invalid", "*").Result(),
|
||||
ie: collections.NewIncludesExcludes().Includes("ns1", "invalid", "*"),
|
||||
namespaces: []*corev1.Namespace{
|
||||
namespaces: []*corev1api.Namespace{
|
||||
builder.ForNamespace("ns1").ObjectMeta(builder.WithLabels("name", "ns1")).Result(),
|
||||
builder.ForNamespace("ns2").Result(),
|
||||
builder.ForNamespace("ns3").Result(),
|
||||
|
|
|
@ -20,7 +20,7 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/vmware-tanzu/velero/internal/resourcepolicies"
|
||||
|
@ -284,7 +284,7 @@ func (b *BackupBuilder) ItemOperationTimeout(timeout time.Duration) *BackupBuild
|
|||
|
||||
// ResourcePolicies sets the Backup's resource polices.
|
||||
func (b *BackupBuilder) ResourcePolicies(name string) *BackupBuilder {
|
||||
b.object.Spec.ResourcePolicy = &v1.TypedLocalObjectReference{Kind: resourcepolicies.ConfigmapRefType, Name: name}
|
||||
b.object.Spec.ResourcePolicy = &corev1api.TypedLocalObjectReference{Kind: resourcepolicies.ConfigmapRefType, Name: name}
|
||||
return b
|
||||
}
|
||||
|
||||
|
|
|
@ -17,21 +17,21 @@ limitations under the License.
|
|||
package builder
|
||||
|
||||
import (
|
||||
rbacv1api "k8s.io/api/rbac/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// RoleBuilder builds Role objects.
|
||||
type RoleBuilder struct {
|
||||
object *rbacv1api.Role
|
||||
object *rbacv1.Role
|
||||
}
|
||||
|
||||
// ForRole is the constructor for a RoleBuilder.
|
||||
func ForRole(ns, name string) *RoleBuilder {
|
||||
return &RoleBuilder{
|
||||
object: &rbacv1api.Role{
|
||||
object: &rbacv1.Role{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: rbacv1api.SchemeGroupVersion.String(),
|
||||
APIVersion: rbacv1.SchemeGroupVersion.String(),
|
||||
Kind: "Role",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -43,7 +43,7 @@ func ForRole(ns, name string) *RoleBuilder {
|
|||
}
|
||||
|
||||
// Result returns the built Role.
|
||||
func (b *RoleBuilder) Result() *rbacv1api.Role {
|
||||
func (b *RoleBuilder) Result() *rbacv1.Role {
|
||||
return b.object
|
||||
}
|
||||
|
||||
|
|
|
@ -17,37 +17,37 @@ limitations under the License.
|
|||
package builder
|
||||
|
||||
import (
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
appsv1api "k8s.io/api/apps/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// StatefulSetBuilder builds StatefulSet objects.
|
||||
type StatefulSetBuilder struct {
|
||||
object *appsv1.StatefulSet
|
||||
object *appsv1api.StatefulSet
|
||||
}
|
||||
|
||||
// ForStatefulSet is the constructor for a StatefulSetBuilder.
|
||||
func ForStatefulSet(ns, name string) *StatefulSetBuilder {
|
||||
return &StatefulSetBuilder{
|
||||
object: &appsv1.StatefulSet{
|
||||
object: &appsv1api.StatefulSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: appsv1.SchemeGroupVersion.String(),
|
||||
APIVersion: appsv1api.SchemeGroupVersion.String(),
|
||||
Kind: "StatefulSet",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: ns,
|
||||
Name: name,
|
||||
},
|
||||
Spec: appsv1.StatefulSetSpec{
|
||||
VolumeClaimTemplates: []corev1.PersistentVolumeClaim{},
|
||||
Spec: appsv1api.StatefulSetSpec{
|
||||
VolumeClaimTemplates: []corev1api.PersistentVolumeClaim{},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Result returns the built StatefulSet.
|
||||
func (b *StatefulSetBuilder) Result() *appsv1.StatefulSet {
|
||||
func (b *StatefulSetBuilder) Result() *appsv1api.StatefulSet {
|
||||
return b.object
|
||||
}
|
||||
|
||||
|
@ -56,7 +56,7 @@ func (b *StatefulSetBuilder) StorageClass(names ...string) *StatefulSetBuilder {
|
|||
for _, name := range names {
|
||||
nameTmp := name
|
||||
b.object.Spec.VolumeClaimTemplates = append(b.object.Spec.VolumeClaimTemplates,
|
||||
corev1.PersistentVolumeClaim{Spec: corev1.PersistentVolumeClaimSpec{StorageClassName: &nameTmp}})
|
||||
corev1api.PersistentVolumeClaim{Spec: corev1api.PersistentVolumeClaimSpec{StorageClassName: &nameTmp}})
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ package builder
|
|||
|
||||
import (
|
||||
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
@ -62,7 +62,7 @@ func (v *VolumeSnapshotContentBuilder) DeletionPolicy(policy snapshotv1api.Delet
|
|||
|
||||
// VolumeSnapshotRef sets the built VolumeSnapshotContent's spec.VolumeSnapshotRef value.
|
||||
func (v *VolumeSnapshotContentBuilder) VolumeSnapshotRef(namespace, name, uid string) *VolumeSnapshotContentBuilder {
|
||||
v.object.Spec.VolumeSnapshotRef = v1.ObjectReference{
|
||||
v.object.Spec.VolumeSnapshotRef = corev1api.ObjectReference{
|
||||
APIVersion: "snapshot.storage.k8s.io/v1",
|
||||
Kind: "VolumeSnapshot",
|
||||
Namespace: namespace,
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
flag "github.com/spf13/pflag"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
factorymocks "github.com/vmware-tanzu/velero/pkg/client/mocks"
|
||||
|
@ -80,8 +80,8 @@ func TestBuildBackupStorageLocationSetsCredential(t *testing.T) {
|
|||
|
||||
bsl, err = o.BuildBackupStorageLocation("velero-test-ns", false, true)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, &v1.SecretKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{Name: "my-secret"},
|
||||
assert.Equal(t, &corev1api.SecretKeySelector{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{Name: "my-secret"},
|
||||
Key: "key-from-secret",
|
||||
}, bsl.Spec.Credential)
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
@ -142,7 +142,7 @@ func newdataMoverBackup(logger logrus.FieldLogger, factory client.Factory, confi
|
|||
return nil, errors.Wrap(err, "error to add velero v2alpha1 scheme")
|
||||
}
|
||||
|
||||
if err := v1.AddToScheme(scheme); err != nil {
|
||||
if err := corev1api.AddToScheme(scheme); err != nil {
|
||||
cancelFunc()
|
||||
return nil, errors.Wrap(err, "error to add core v1 scheme")
|
||||
}
|
||||
|
@ -153,7 +153,7 @@ func newdataMoverBackup(logger logrus.FieldLogger, factory client.Factory, confi
|
|||
cacheOption := ctlcache.Options{
|
||||
Scheme: scheme,
|
||||
ByObject: map[ctlclient.Object]ctlcache.ByObject{
|
||||
&v1.Pod{}: {
|
||||
&corev1api.Pod{}: {
|
||||
Field: fields.Set{"spec.nodeName": nodeName}.AsSelector(),
|
||||
},
|
||||
&velerov2alpha1api.DataUpload{}: {
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
@ -134,7 +134,7 @@ func newdataMoverRestore(logger logrus.FieldLogger, factory client.Factory, conf
|
|||
return nil, errors.Wrap(err, "error to add velero v2alpha1 scheme")
|
||||
}
|
||||
|
||||
if err := v1.AddToScheme(scheme); err != nil {
|
||||
if err := corev1api.AddToScheme(scheme); err != nil {
|
||||
cancelFunc()
|
||||
return nil, errors.Wrap(err, "error to add core v1 scheme")
|
||||
}
|
||||
|
@ -145,7 +145,7 @@ func newdataMoverRestore(logger logrus.FieldLogger, factory client.Factory, conf
|
|||
cacheOption := ctlcache.Options{
|
||||
Scheme: scheme,
|
||||
ByObject: map[ctlclient.Object]ctlcache.ByObject{
|
||||
&v1.Pod{}: {
|
||||
&corev1api.Pod{}: {
|
||||
Field: fields.Set{"spec.nodeName": nodeName}.AsSelector(),
|
||||
},
|
||||
&velerov2alpha1api.DataDownload{}: {
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/vmware-tanzu/crash-diagnostics/exec"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
appsv1api "k8s.io/api/apps/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
@ -117,7 +117,7 @@ func (o *option) validate(f client.Factory) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
deploymentList := new(appsv1.DeploymentList)
|
||||
deploymentList := new(appsv1api.DeploymentList)
|
||||
selector, err := labels.Parse("component=velero")
|
||||
cmd.CheckError(err)
|
||||
err = crClient.List(context.TODO(), deploymentList, &ctrlclient.ListOptions{
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
storagev1api "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
|
@ -170,7 +170,7 @@ func newNodeAgentServer(logger logrus.FieldLogger, factory client.Factory, confi
|
|||
cancelFunc()
|
||||
return nil, err
|
||||
}
|
||||
if err := v1.AddToScheme(scheme); err != nil {
|
||||
if err := corev1api.AddToScheme(scheme); err != nil {
|
||||
cancelFunc()
|
||||
return nil, err
|
||||
}
|
||||
|
@ -184,7 +184,7 @@ func newNodeAgentServer(logger logrus.FieldLogger, factory client.Factory, confi
|
|||
// use a field selector to filter to only pods scheduled on this node.
|
||||
cacheOption := cache.Options{
|
||||
ByObject: map[ctrlclient.Object]cache.ByObject{
|
||||
&v1.Pod{}: {
|
||||
&corev1api.Pod{}: {
|
||||
Field: fields.Set{"spec.nodeName": nodeName}.AsSelector(),
|
||||
},
|
||||
&velerov1api.PodVolumeBackup{}: {
|
||||
|
@ -199,7 +199,7 @@ func newNodeAgentServer(logger logrus.FieldLogger, factory client.Factory, confi
|
|||
&velerov2alpha1api.DataDownload{}: {
|
||||
Field: fields.Set{"metadata.namespace": factory.Namespace()}.AsSelector(),
|
||||
},
|
||||
&v1.Event{}: {
|
||||
&corev1api.Event{}: {
|
||||
Field: fields.Set{"metadata.namespace": factory.Namespace()}.AsSelector(),
|
||||
},
|
||||
},
|
||||
|
@ -328,7 +328,7 @@ func (s *nodeAgentServer) run() {
|
|||
s.logger.Infof("Using customized backupPVC config %v", backupPVCConfig)
|
||||
}
|
||||
|
||||
podResources := v1.ResourceRequirements{}
|
||||
podResources := corev1api.ResourceRequirements{}
|
||||
if s.dataPathConfigs != nil && s.dataPathConfigs.PodResources != nil {
|
||||
if res, err := kube.ParseResourceRequirements(s.dataPathConfigs.PodResources.CPURequest, s.dataPathConfigs.PodResources.MemoryRequest, s.dataPathConfigs.PodResources.CPULimit, s.dataPathConfigs.PodResources.MemoryLimit); err != nil {
|
||||
s.logger.WithError(err).Warn("Pod resource requirements are invalid, ignore")
|
||||
|
@ -391,7 +391,7 @@ func (s *nodeAgentServer) run() {
|
|||
}
|
||||
|
||||
func (s *nodeAgentServer) waitCacheForResume() error {
|
||||
podInformer, err := s.mgr.GetCache().GetInformer(s.ctx, &v1.Pod{})
|
||||
podInformer, err := s.mgr.GetCache().GetInformer(s.ctx, &corev1api.Pod{})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error getting pod informer")
|
||||
}
|
||||
|
@ -444,7 +444,7 @@ func (s *nodeAgentServer) validatePodVolumesHostPath(client kubernetes.Interface
|
|||
|
||||
// if the pod is a mirror pod, the directory name is the hash value of the
|
||||
// mirror pod annotation
|
||||
if hash, ok := pod.GetAnnotations()[v1.MirrorPodAnnotationKey]; ok {
|
||||
if hash, ok := pod.GetAnnotations()[corev1api.MirrorPodAnnotationKey]; ok {
|
||||
dirName = hash
|
||||
}
|
||||
|
||||
|
@ -517,7 +517,7 @@ func (s *nodeAgentServer) markInProgressPVRsFailed(client ctrlclient.Client) {
|
|||
continue
|
||||
}
|
||||
|
||||
pod := &v1.Pod{}
|
||||
pod := &corev1api.Pod{}
|
||||
if err := client.Get(s.ctx, types.NamespacedName{
|
||||
Namespace: pvr.Spec.Pod.Namespace,
|
||||
Name: pvr.Spec.Pod.Name,
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
@ -38,14 +38,14 @@ import (
|
|||
func Test_validatePodVolumesHostPath(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pods []*corev1.Pod
|
||||
pods []*corev1api.Pod
|
||||
dirs []string
|
||||
createDir bool
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "no error when pod volumes are present",
|
||||
pods: []*corev1.Pod{
|
||||
pods: []*corev1api.Pod{
|
||||
builder.ForPod("foo", "bar").ObjectMeta(builder.WithUID("foo")).Result(),
|
||||
builder.ForPod("zoo", "raz").ObjectMeta(builder.WithUID("zoo")).Result(),
|
||||
},
|
||||
|
@ -55,9 +55,9 @@ func Test_validatePodVolumesHostPath(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "no error when pod volumes are present and there are mirror pods",
|
||||
pods: []*corev1.Pod{
|
||||
pods: []*corev1api.Pod{
|
||||
builder.ForPod("foo", "bar").ObjectMeta(builder.WithUID("foo")).Result(),
|
||||
builder.ForPod("zoo", "raz").ObjectMeta(builder.WithUID("zoo"), builder.WithAnnotations(corev1.MirrorPodAnnotationKey, "baz")).Result(),
|
||||
builder.ForPod("zoo", "raz").ObjectMeta(builder.WithUID("zoo"), builder.WithAnnotations(corev1api.MirrorPodAnnotationKey, "baz")).Result(),
|
||||
},
|
||||
dirs: []string{"foo", "baz"},
|
||||
createDir: true,
|
||||
|
@ -65,7 +65,7 @@ func Test_validatePodVolumesHostPath(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "error when all pod volumes missing",
|
||||
pods: []*corev1.Pod{
|
||||
pods: []*corev1api.Pod{
|
||||
builder.ForPod("foo", "bar").ObjectMeta(builder.WithUID("foo")).Result(),
|
||||
builder.ForPod("zoo", "raz").ObjectMeta(builder.WithUID("zoo")).Result(),
|
||||
},
|
||||
|
@ -75,7 +75,7 @@ func Test_validatePodVolumesHostPath(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "error when some pod volumes missing",
|
||||
pods: []*corev1.Pod{
|
||||
pods: []*corev1api.Pod{
|
||||
builder.ForPod("foo", "bar").ObjectMeta(builder.WithUID("foo")).Result(),
|
||||
builder.ForPod("zoo", "raz").ObjectMeta(builder.WithUID("zoo")).Result(),
|
||||
},
|
||||
|
@ -85,7 +85,7 @@ func Test_validatePodVolumesHostPath(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "no error when pod volumes are not present",
|
||||
pods: []*corev1.Pod{
|
||||
pods: []*corev1api.Pod{
|
||||
builder.ForPod("foo", "bar").ObjectMeta(builder.WithUID("foo")).Result(),
|
||||
},
|
||||
dirs: []string{"foo"},
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
@ -90,7 +90,7 @@ func (o *Options) initClient(f velerocli.Factory) (client.Client, error) {
|
|||
return nil, errors.Wrap(err, "failed to add velero scheme")
|
||||
}
|
||||
|
||||
err = v1.AddToScheme(scheme)
|
||||
err = corev1api.AddToScheme(scheme)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to add api core scheme")
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
@ -299,12 +299,12 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
|
|||
}
|
||||
}
|
||||
|
||||
var resModifiers *corev1.TypedLocalObjectReference
|
||||
var resModifiers *corev1api.TypedLocalObjectReference
|
||||
|
||||
if o.ResourceModifierConfigMap != "" {
|
||||
resModifiers = &corev1.TypedLocalObjectReference{
|
||||
resModifiers = &corev1api.TypedLocalObjectReference{
|
||||
// Group for core API is ""
|
||||
APIGroup: &corev1.SchemeGroupVersion.Group,
|
||||
APIGroup: &corev1api.SchemeGroupVersion.Group,
|
||||
Kind: resourcemodifiers.ConfigmapRefType,
|
||||
Name: o.ResourceModifierConfigMap,
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/vmware-tanzu/velero/internal/resourcepolicies"
|
||||
|
@ -168,7 +168,7 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
|
|||
}
|
||||
|
||||
if o.BackupOptions.ResPoliciesConfigmap != "" {
|
||||
schedule.Spec.Template.ResourcePolicy = &v1.TypedLocalObjectReference{Kind: resourcepolicies.ConfigmapRefType, Name: o.BackupOptions.ResPoliciesConfigmap}
|
||||
schedule.Spec.Template.ResourcePolicy = &corev1api.TypedLocalObjectReference{Kind: resourcepolicies.ConfigmapRefType, Name: o.BackupOptions.ResPoliciesConfigmap}
|
||||
}
|
||||
|
||||
if o.BackupOptions.ParallelFilesUpload > 0 {
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
"github.com/spf13/pflag"
|
||||
|
||||
appsv1api "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -173,7 +173,7 @@ func Run(ctx context.Context, kbClient kbclient.Client, namespace string) error
|
|||
|
||||
func deleteNamespace(ctx context.Context, kbClient kbclient.Client, namespace string) error {
|
||||
// First check if it's already been deleted
|
||||
ns := &corev1.Namespace{}
|
||||
ns := &corev1api.Namespace{}
|
||||
key := kbclient.ObjectKey{Name: namespace}
|
||||
if err := kbClient.Get(ctx, key, ns); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
|
|
|
@ -32,7 +32,7 @@ import (
|
|||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
appsv1api "k8s.io/api/apps/v1"
|
||||
batchv1api "k8s.io/api/batch/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
@ -234,7 +234,7 @@ func newServer(f client.Factory, config *config.Config, logger *logrus.Logger) (
|
|||
cancelFunc()
|
||||
return nil, err
|
||||
}
|
||||
if err := appsv1.AddToScheme(scheme); err != nil {
|
||||
if err := appsv1api.AddToScheme(scheme); err != nil {
|
||||
cancelFunc()
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
|
@ -247,7 +247,7 @@ func Test_newServer(t *testing.T) {
|
|||
}
|
||||
|
||||
func Test_namespaceExists(t *testing.T) {
|
||||
client := kubefake.NewSimpleClientset(&corev1.Namespace{
|
||||
client := kubefake.NewSimpleClientset(&corev1api.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "velero",
|
||||
},
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
|
||||
|
@ -119,7 +119,7 @@ func DescribeBackup(
|
|||
}
|
||||
|
||||
// DescribeResourcePolicies describes resource policies in human-readable format
|
||||
func DescribeResourcePolicies(d *Describer, resPolicies *v1.TypedLocalObjectReference) {
|
||||
func DescribeResourcePolicies(d *Describer, resPolicies *corev1api.TypedLocalObjectReference) {
|
||||
d.Printf("Resource policies:\n")
|
||||
d.Printf("\tType:\t%s\n", resPolicies.Kind)
|
||||
d.Printf("\tName:\t%s\n", resPolicies.Name)
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/vmware-tanzu/velero/internal/volume"
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
|
@ -50,7 +50,7 @@ func TestDescribeUploaderConfig(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDescribeResourcePolicies(t *testing.T) {
|
||||
input := &v1.TypedLocalObjectReference{
|
||||
input := &corev1api.TypedLocalObjectReference{
|
||||
Kind: "configmap",
|
||||
Name: "test-resource-policy",
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
"fmt"
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
@ -571,7 +571,7 @@ func DescribeBackupResultsInSF(ctx context.Context, kbClient kbclient.Client, d
|
|||
}
|
||||
|
||||
// DescribeResourcePoliciesInSF describes resource policies in structured format.
|
||||
func DescribeResourcePoliciesInSF(d *StructuredDescriber, resPolicies *v1.TypedLocalObjectReference) {
|
||||
func DescribeResourcePoliciesInSF(d *StructuredDescriber, resPolicies *corev1api.TypedLocalObjectReference) {
|
||||
policiesInfo := make(map[string]any)
|
||||
policiesInfo["type"] = resPolicies.Kind
|
||||
policiesInfo["name"] = resPolicies.Name
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/vmware-tanzu/velero/internal/volume"
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
|
@ -523,7 +523,7 @@ func TestDescribeCSISnapshotsInSF(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDescribeResourcePoliciesInSF(t *testing.T) {
|
||||
input := &v1.TypedLocalObjectReference{
|
||||
input := &corev1api.TypedLocalObjectReference{
|
||||
Kind: "configmap",
|
||||
Name: "resource-policy-1",
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
|
||||
"github.com/vmware-tanzu/velero/internal/volume"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
|
@ -488,7 +488,7 @@ func describeRestoreResourceList(ctx context.Context, kbClient kbclient.Client,
|
|||
}
|
||||
|
||||
// DescribeResourceModifier describes resource policies in human-readable format
|
||||
func DescribeResourceModifier(d *Describer, resModifier *v1.TypedLocalObjectReference) {
|
||||
func DescribeResourceModifier(d *Describer, resModifier *corev1api.TypedLocalObjectReference) {
|
||||
d.Printf("Resource modifier:\n")
|
||||
d.Printf("\tType:\t%s\n", resModifier.Kind)
|
||||
d.Printf("\tName:\t%s\n", resModifier.Name)
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/vmware-tanzu/velero/internal/volume"
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
|
@ -400,8 +400,8 @@ func TestDescribeResourceModifier(t *testing.T) {
|
|||
|
||||
d.out.Init(d.buf, 0, 8, 2, ' ', 0)
|
||||
|
||||
DescribeResourceModifier(d, &v1.TypedLocalObjectReference{
|
||||
APIGroup: &v1.SchemeGroupVersion.Group,
|
||||
DescribeResourceModifier(d, &corev1api.TypedLocalObjectReference{
|
||||
APIGroup: &corev1api.SchemeGroupVersion.Group,
|
||||
Kind: "ConfigMap",
|
||||
Name: "resourceModifier",
|
||||
})
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
jsonpatch "github.com/evanphx/json-patch/v5"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
@ -540,7 +540,7 @@ func (r *backupDeletionReconciler) deleteMovedSnapshots(ctx context.Context, bac
|
|||
if r.repoMgr == nil {
|
||||
return nil
|
||||
}
|
||||
list := &corev1.ConfigMapList{}
|
||||
list := &corev1api.ConfigMapList{}
|
||||
if err := r.Client.List(ctx, list, &client.ListOptions{
|
||||
Namespace: backup.Namespace,
|
||||
LabelSelector: labels.SelectorFromSet(
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
@ -671,14 +671,14 @@ func TestNeedInvalidBackupRepo(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetBackupRepositoryConfig(t *testing.T) {
|
||||
configWithNoData := &corev1.ConfigMap{
|
||||
configWithNoData := &corev1api.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "config-1",
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
},
|
||||
}
|
||||
|
||||
configWithWrongData := &corev1.ConfigMap{
|
||||
configWithWrongData := &corev1api.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "config-1",
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
|
@ -688,7 +688,7 @@ func TestGetBackupRepositoryConfig(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
configWithData := &corev1.ConfigMap{
|
||||
configWithData := &corev1api.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "config-1",
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
|
@ -752,7 +752,7 @@ func TestGetBackupRepositoryConfig(t *testing.T) {
|
|||
}
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
corev1.AddToScheme(scheme)
|
||||
corev1api.AddToScheme(scheme)
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
|
@ -958,7 +958,7 @@ func TestRecallMaintenance(t *testing.T) {
|
|||
|
||||
scheme := runtime.NewScheme()
|
||||
batchv1.AddToScheme(scheme)
|
||||
corev1.AddToScheme(scheme)
|
||||
corev1api.AddToScheme(scheme)
|
||||
velerov1api.AddToScheme(scheme)
|
||||
|
||||
jobSucceeded := &batchv1.Job{
|
||||
|
@ -975,9 +975,9 @@ func TestRecallMaintenance(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
jobPodSucceeded := builder.ForPod(velerov1api.DefaultNamespace, "job1").Labels(map[string]string{"job-name": "job1"}).ContainerStatuses(&corev1.ContainerStatus{
|
||||
State: corev1.ContainerState{
|
||||
Terminated: &corev1.ContainerStateTerminated{},
|
||||
jobPodSucceeded := builder.ForPod(velerov1api.DefaultNamespace, "job1").Labels(map[string]string{"job-name": "job1"}).ContainerStatuses(&corev1api.ContainerStatus{
|
||||
State: corev1api.ContainerState{
|
||||
Terminated: &corev1api.ContainerStateTerminated{},
|
||||
},
|
||||
}).Result()
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
@ -65,13 +65,13 @@ type DataDownloadReconciler struct {
|
|||
nodeName string
|
||||
dataPathMgr *datapath.Manager
|
||||
restorePVCConfig nodeagent.RestorePVC
|
||||
podResources v1.ResourceRequirements
|
||||
podResources corev1api.ResourceRequirements
|
||||
preparingTimeout time.Duration
|
||||
metrics *metrics.ServerMetrics
|
||||
}
|
||||
|
||||
func NewDataDownloadReconciler(client client.Client, mgr manager.Manager, kubeClient kubernetes.Interface, dataPathMgr *datapath.Manager,
|
||||
restorePVCConfig nodeagent.RestorePVC, podResources v1.ResourceRequirements, nodeName string, preparingTimeout time.Duration,
|
||||
restorePVCConfig nodeagent.RestorePVC, podResources corev1api.ResourceRequirements, nodeName string, preparingTimeout time.Duration,
|
||||
logger logrus.FieldLogger, metrics *metrics.ServerMetrics) *DataDownloadReconciler {
|
||||
return &DataDownloadReconciler{
|
||||
client: client,
|
||||
|
@ -518,10 +518,10 @@ func (r *DataDownloadReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
|||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&velerov2alpha1api.DataDownload{}).
|
||||
WatchesRawSource(s).
|
||||
Watches(&v1.Pod{}, kube.EnqueueRequestsFromMapUpdateFunc(r.findSnapshotRestoreForPod),
|
||||
Watches(&corev1api.Pod{}, kube.EnqueueRequestsFromMapUpdateFunc(r.findSnapshotRestoreForPod),
|
||||
builder.WithPredicates(predicate.Funcs{
|
||||
UpdateFunc: func(ue event.UpdateEvent) bool {
|
||||
newObj := ue.ObjectNew.(*v1.Pod)
|
||||
newObj := ue.ObjectNew.(*corev1api.Pod)
|
||||
|
||||
if _, ok := newObj.Labels[velerov1api.DataDownloadLabel]; !ok {
|
||||
return false
|
||||
|
@ -547,7 +547,7 @@ func (r *DataDownloadReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
|||
}
|
||||
|
||||
func (r *DataDownloadReconciler) findSnapshotRestoreForPod(ctx context.Context, podObj client.Object) []reconcile.Request {
|
||||
pod := podObj.(*v1.Pod)
|
||||
pod := podObj.(*corev1api.Pod)
|
||||
dd, err := findDataDownloadByPod(r.client, *pod)
|
||||
|
||||
log := r.logger.WithField("pod", pod.Name)
|
||||
|
@ -566,7 +566,7 @@ func (r *DataDownloadReconciler) findSnapshotRestoreForPod(ctx context.Context,
|
|||
return []reconcile.Request{}
|
||||
}
|
||||
|
||||
if pod.Status.Phase == v1.PodRunning {
|
||||
if pod.Status.Phase == corev1api.PodRunning {
|
||||
log.Info("Preparing data download")
|
||||
// we don't expect anyone else update the CR during the Prepare process
|
||||
updated, err := r.exclusiveUpdateDataDownload(context.Background(), dd, r.prepareDataDownload)
|
||||
|
@ -711,7 +711,7 @@ func (r *DataDownloadReconciler) exclusiveUpdateDataDownload(ctx context.Context
|
|||
}
|
||||
}
|
||||
|
||||
func (r *DataDownloadReconciler) getTargetPVC(ctx context.Context, dd *velerov2alpha1api.DataDownload) (*v1.PersistentVolumeClaim, error) {
|
||||
func (r *DataDownloadReconciler) getTargetPVC(ctx context.Context, dd *velerov2alpha1api.DataDownload) (*corev1api.PersistentVolumeClaim, error) {
|
||||
return r.kubeClient.CoreV1().PersistentVolumeClaims(dd.Spec.TargetVolume.Namespace).Get(ctx, dd.Spec.TargetVolume.PVC, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
|
@ -772,8 +772,8 @@ func (r *DataDownloadReconciler) setupExposeParam(dd *velerov2alpha1api.DataDown
|
|||
}, nil
|
||||
}
|
||||
|
||||
func getDataDownloadOwnerObject(dd *velerov2alpha1api.DataDownload) v1.ObjectReference {
|
||||
return v1.ObjectReference{
|
||||
func getDataDownloadOwnerObject(dd *velerov2alpha1api.DataDownload) corev1api.ObjectReference {
|
||||
return corev1api.ObjectReference{
|
||||
Kind: dd.Kind,
|
||||
Namespace: dd.Namespace,
|
||||
Name: dd.Name,
|
||||
|
@ -782,7 +782,7 @@ func getDataDownloadOwnerObject(dd *velerov2alpha1api.DataDownload) v1.ObjectRef
|
|||
}
|
||||
}
|
||||
|
||||
func findDataDownloadByPod(client client.Client, pod v1.Pod) (*velerov2alpha1api.DataDownload, error) {
|
||||
func findDataDownloadByPod(client client.Client, pod corev1api.Pod) (*velerov2alpha1api.DataDownload, error) {
|
||||
if label, exist := pod.Labels[velerov1api.DataDownloadLabel]; exist {
|
||||
dd := &velerov2alpha1api.DataDownload{}
|
||||
err := client.Get(context.Background(), types.NamespacedName{
|
||||
|
|
|
@ -27,8 +27,8 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
appsv1api "k8s.io/api/apps/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -101,7 +101,7 @@ func initDataDownloadReconcilerWithError(objects []runtime.Object, needError ...
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = corev1.AddToScheme(scheme)
|
||||
err = corev1api.AddToScheme(scheme)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -142,23 +142,23 @@ func initDataDownloadReconcilerWithError(objects []runtime.Object, needError ...
|
|||
|
||||
dataPathMgr := datapath.NewManager(1)
|
||||
|
||||
return NewDataDownloadReconciler(fakeClient, nil, fakeKubeClient, dataPathMgr, nodeagent.RestorePVC{}, corev1.ResourceRequirements{}, "test-node", time.Minute*5, velerotest.NewLogger(), metrics.NewServerMetrics()), nil
|
||||
return NewDataDownloadReconciler(fakeClient, nil, fakeKubeClient, dataPathMgr, nodeagent.RestorePVC{}, corev1api.ResourceRequirements{}, "test-node", time.Minute*5, velerotest.NewLogger(), metrics.NewServerMetrics()), nil
|
||||
}
|
||||
|
||||
func TestDataDownloadReconcile(t *testing.T) {
|
||||
daemonSet := &appsv1.DaemonSet{
|
||||
daemonSet := &appsv1api.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "velero",
|
||||
Name: "node-agent",
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "DaemonSet",
|
||||
APIVersion: appsv1.SchemeGroupVersion.String(),
|
||||
APIVersion: appsv1api.SchemeGroupVersion.String(),
|
||||
},
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
Spec: appsv1api.DaemonSetSpec{
|
||||
Template: corev1api.PodTemplateSpec{
|
||||
Spec: corev1api.PodSpec{
|
||||
Containers: []corev1api.Container{
|
||||
{
|
||||
Image: "fake-image",
|
||||
},
|
||||
|
@ -173,7 +173,7 @@ func TestDataDownloadReconcile(t *testing.T) {
|
|||
tests := []struct {
|
||||
name string
|
||||
dd *velerov2alpha1api.DataDownload
|
||||
targetPVC *corev1.PersistentVolumeClaim
|
||||
targetPVC *corev1api.PersistentVolumeClaim
|
||||
dataMgr *datapath.Manager
|
||||
needErrs []bool
|
||||
needCreateFSBR bool
|
||||
|
@ -453,7 +453,7 @@ func TestDataDownloadReconcile(t *testing.T) {
|
|||
if test.isExposeErr {
|
||||
ep.On("Expose", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(errors.New("Error to expose restore exposer"))
|
||||
} else if test.notNilExpose {
|
||||
hostingPod := builder.ForPod("test-ns", "test-name").Volumes(&corev1.Volume{Name: "test-pvc"}).Result()
|
||||
hostingPod := builder.ForPod("test-ns", "test-name").Volumes(&corev1api.Volume{Name: "test-pvc"}).Result()
|
||||
hostingPod.ObjectMeta.SetUID("test-uid")
|
||||
ep.On("GetExposed", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&exposer.ExposeResult{ByPod: exposer.ExposeByPod{HostingPod: hostingPod, VolumeName: "test-pvc"}}, nil)
|
||||
} else if test.isGetExposeErr {
|
||||
|
@ -707,13 +707,13 @@ func TestFindDataDownloadForPod(t *testing.T) {
|
|||
tests := []struct {
|
||||
name string
|
||||
du *velerov2alpha1api.DataDownload
|
||||
pod *corev1.Pod
|
||||
pod *corev1api.Pod
|
||||
checkFunc func(*velerov2alpha1api.DataDownload, []reconcile.Request)
|
||||
}{
|
||||
{
|
||||
name: "find dataDownload for pod",
|
||||
du: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseAccepted).Result(),
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, dataDownloadName).Labels(map[string]string{velerov1api.DataDownloadLabel: dataDownloadName}).Status(corev1.PodStatus{Phase: corev1.PodRunning}).Result(),
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, dataDownloadName).Labels(map[string]string{velerov1api.DataDownloadLabel: dataDownloadName}).Status(corev1api.PodStatus{Phase: corev1api.PodRunning}).Result(),
|
||||
checkFunc: func(du *velerov2alpha1api.DataDownload, requests []reconcile.Request) {
|
||||
// Assert that the function returns a single request
|
||||
assert.Len(t, requests, 1)
|
||||
|
@ -969,27 +969,27 @@ func (dt *ddResumeTestHelper) resumeCancellableDataPath(_ *DataUploadReconciler,
|
|||
return dt.resumeErr
|
||||
}
|
||||
|
||||
func (dt *ddResumeTestHelper) Expose(context.Context, corev1.ObjectReference, exposer.GenericRestoreExposeParam) error {
|
||||
func (dt *ddResumeTestHelper) Expose(context.Context, corev1api.ObjectReference, exposer.GenericRestoreExposeParam) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dt *ddResumeTestHelper) GetExposed(context.Context, corev1.ObjectReference, kbclient.Client, string, time.Duration) (*exposer.ExposeResult, error) {
|
||||
func (dt *ddResumeTestHelper) GetExposed(context.Context, corev1api.ObjectReference, kbclient.Client, string, time.Duration) (*exposer.ExposeResult, error) {
|
||||
return dt.exposeResult, dt.getExposeErr
|
||||
}
|
||||
|
||||
func (dt *ddResumeTestHelper) PeekExposed(context.Context, corev1.ObjectReference) error {
|
||||
func (dt *ddResumeTestHelper) PeekExposed(context.Context, corev1api.ObjectReference) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dt *ddResumeTestHelper) DiagnoseExpose(context.Context, corev1.ObjectReference) string {
|
||||
func (dt *ddResumeTestHelper) DiagnoseExpose(context.Context, corev1api.ObjectReference) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (dt *ddResumeTestHelper) RebindVolume(context.Context, corev1.ObjectReference, string, string, time.Duration) error {
|
||||
func (dt *ddResumeTestHelper) RebindVolume(context.Context, corev1api.ObjectReference, string, string, time.Duration) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dt *ddResumeTestHelper) CleanUp(context.Context, corev1.ObjectReference) {}
|
||||
func (dt *ddResumeTestHelper) CleanUp(context.Context, corev1api.ObjectReference) {}
|
||||
|
||||
func (dt *ddResumeTestHelper) newMicroServiceBRWatcher(kbclient.Client, kubernetes.Interface, manager.Manager, string, string, string, string, string, string,
|
||||
datapath.Callbacks, logrus.FieldLogger) datapath.AsyncBR {
|
||||
|
@ -1153,7 +1153,7 @@ func TestResumeCancellableRestore(t *testing.T) {
|
|||
dd: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseAccepted).Node("node-1").Result(),
|
||||
exposeResult: &exposer.ExposeResult{
|
||||
ByPod: exposer.ExposeByPod{
|
||||
HostingPod: &corev1.Pod{},
|
||||
HostingPod: &corev1api.Pod{},
|
||||
},
|
||||
},
|
||||
mockInit: true,
|
||||
|
@ -1166,7 +1166,7 @@ func TestResumeCancellableRestore(t *testing.T) {
|
|||
dd: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseAccepted).Node("node-1").Result(),
|
||||
exposeResult: &exposer.ExposeResult{
|
||||
ByPod: exposer.ExposeByPod{
|
||||
HostingPod: &corev1.Pod{},
|
||||
HostingPod: &corev1api.Pod{},
|
||||
},
|
||||
},
|
||||
mockInit: true,
|
||||
|
@ -1180,7 +1180,7 @@ func TestResumeCancellableRestore(t *testing.T) {
|
|||
dd: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseAccepted).Node("node-1").Result(),
|
||||
exposeResult: &exposer.ExposeResult{
|
||||
ByPod: exposer.ExposeByPod{
|
||||
HostingPod: &corev1.Pod{},
|
||||
HostingPod: &corev1api.Pod{},
|
||||
},
|
||||
},
|
||||
mockInit: true,
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
snapshotter "github.com/kubernetes-csi/external-snapshotter/client/v7/clientset/versioned/typed/volumesnapshot/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
@ -74,7 +74,7 @@ type DataUploadReconciler struct {
|
|||
dataPathMgr *datapath.Manager
|
||||
loadAffinity *kube.LoadAffinity
|
||||
backupPVCConfig map[string]nodeagent.BackupPVC
|
||||
podResources corev1.ResourceRequirements
|
||||
podResources corev1api.ResourceRequirements
|
||||
preparingTimeout time.Duration
|
||||
metrics *metrics.ServerMetrics
|
||||
}
|
||||
|
@ -87,7 +87,7 @@ func NewDataUploadReconciler(
|
|||
dataPathMgr *datapath.Manager,
|
||||
loadAffinity *kube.LoadAffinity,
|
||||
backupPVCConfig map[string]nodeagent.BackupPVC,
|
||||
podResources corev1.ResourceRequirements,
|
||||
podResources corev1api.ResourceRequirements,
|
||||
clock clocks.WithTickerAndDelayedExecution,
|
||||
nodeName string,
|
||||
preparingTimeout time.Duration,
|
||||
|
@ -569,10 +569,10 @@ func (r *DataUploadReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
|||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&velerov2alpha1api.DataUpload{}).
|
||||
WatchesRawSource(s).
|
||||
Watches(&corev1.Pod{}, kube.EnqueueRequestsFromMapUpdateFunc(r.findDataUploadForPod),
|
||||
Watches(&corev1api.Pod{}, kube.EnqueueRequestsFromMapUpdateFunc(r.findDataUploadForPod),
|
||||
builder.WithPredicates(predicate.Funcs{
|
||||
UpdateFunc: func(ue event.UpdateEvent) bool {
|
||||
newObj := ue.ObjectNew.(*corev1.Pod)
|
||||
newObj := ue.ObjectNew.(*corev1api.Pod)
|
||||
|
||||
if _, ok := newObj.Labels[velerov1api.DataUploadLabel]; !ok {
|
||||
return false
|
||||
|
@ -598,7 +598,7 @@ func (r *DataUploadReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
|||
}
|
||||
|
||||
func (r *DataUploadReconciler) findDataUploadForPod(ctx context.Context, podObj client.Object) []reconcile.Request {
|
||||
pod := podObj.(*corev1.Pod)
|
||||
pod := podObj.(*corev1api.Pod)
|
||||
du, err := findDataUploadByPod(r.client, *pod)
|
||||
log := r.logger.WithFields(logrus.Fields{
|
||||
"Backup pod": pod.Name,
|
||||
|
@ -619,7 +619,7 @@ func (r *DataUploadReconciler) findDataUploadForPod(ctx context.Context, podObj
|
|||
return []reconcile.Request{}
|
||||
}
|
||||
|
||||
if pod.Status.Phase == corev1.PodRunning {
|
||||
if pod.Status.Phase == corev1api.PodRunning {
|
||||
log.Info("Preparing dataupload")
|
||||
// we don't expect anyone else update the CR during the Prepare process
|
||||
updated, err := r.exclusiveUpdateDataUpload(context.Background(), du, r.prepareDataUpload)
|
||||
|
@ -800,7 +800,7 @@ func (r *DataUploadReconciler) setupExposeParam(du *velerov2alpha1api.DataUpload
|
|||
log := r.logger.WithField("dataupload", du.Name)
|
||||
|
||||
if du.Spec.SnapshotType == velerov2alpha1api.SnapshotTypeCSI {
|
||||
pvc := &corev1.PersistentVolumeClaim{}
|
||||
pvc := &corev1api.PersistentVolumeClaim{}
|
||||
err := r.client.Get(context.Background(), types.NamespacedName{
|
||||
Namespace: du.Spec.SourceNamespace,
|
||||
Name: du.Spec.SourcePVC,
|
||||
|
@ -820,7 +820,7 @@ func (r *DataUploadReconciler) setupExposeParam(du *velerov2alpha1api.DataUpload
|
|||
}
|
||||
|
||||
accessMode := exposer.AccessModeFileSystem
|
||||
if pvc.Spec.VolumeMode != nil && *pvc.Spec.VolumeMode == corev1.PersistentVolumeBlock {
|
||||
if pvc.Spec.VolumeMode != nil && *pvc.Spec.VolumeMode == corev1api.PersistentVolumeBlock {
|
||||
accessMode = exposer.AccessModeBlock
|
||||
}
|
||||
|
||||
|
@ -855,7 +855,7 @@ func (r *DataUploadReconciler) setupExposeParam(du *velerov2alpha1api.DataUpload
|
|||
AccessMode: accessMode,
|
||||
OperationTimeout: du.Spec.OperationTimeout.Duration,
|
||||
ExposeTimeout: r.preparingTimeout,
|
||||
VolumeSize: pvc.Spec.Resources.Requests[corev1.ResourceStorage],
|
||||
VolumeSize: pvc.Spec.Resources.Requests[corev1api.ResourceStorage],
|
||||
Affinity: r.loadAffinity,
|
||||
BackupPVCConfig: r.backupPVCConfig,
|
||||
Resources: r.podResources,
|
||||
|
@ -876,8 +876,8 @@ func (r *DataUploadReconciler) setupWaitExposePara(du *velerov2alpha1api.DataUpl
|
|||
return nil
|
||||
}
|
||||
|
||||
func getOwnerObject(du *velerov2alpha1api.DataUpload) corev1.ObjectReference {
|
||||
return corev1.ObjectReference{
|
||||
func getOwnerObject(du *velerov2alpha1api.DataUpload) corev1api.ObjectReference {
|
||||
return corev1api.ObjectReference{
|
||||
Kind: du.Kind,
|
||||
Namespace: du.Namespace,
|
||||
Name: du.Name,
|
||||
|
@ -886,7 +886,7 @@ func getOwnerObject(du *velerov2alpha1api.DataUpload) corev1.ObjectReference {
|
|||
}
|
||||
}
|
||||
|
||||
func findDataUploadByPod(client client.Client, pod corev1.Pod) (*velerov2alpha1api.DataUpload, error) {
|
||||
func findDataUploadByPod(client client.Client, pod corev1api.Pod) (*velerov2alpha1api.DataUpload, error) {
|
||||
if label, exist := pod.Labels[velerov1api.DataUploadLabel]; exist {
|
||||
du := &velerov2alpha1api.DataUpload{}
|
||||
err := client.Get(context.Background(), types.NamespacedName{
|
||||
|
|
|
@ -31,8 +31,8 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
appsv1api "k8s.io/api/apps/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -167,19 +167,19 @@ func initDataUploaderReconcilerWithError(needError ...error) (*DataUploadReconci
|
|||
},
|
||||
}
|
||||
|
||||
daemonSet := &appsv1.DaemonSet{
|
||||
daemonSet := &appsv1api.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "velero",
|
||||
Name: "node-agent",
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "DaemonSet",
|
||||
APIVersion: appsv1.SchemeGroupVersion.String(),
|
||||
APIVersion: appsv1api.SchemeGroupVersion.String(),
|
||||
},
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
Spec: appsv1api.DaemonSetSpec{
|
||||
Template: corev1api.PodTemplateSpec{
|
||||
Spec: corev1api.PodSpec{
|
||||
Containers: []corev1api.Container{
|
||||
{
|
||||
Image: "fake-image",
|
||||
},
|
||||
|
@ -207,7 +207,7 @@ func initDataUploaderReconcilerWithError(needError ...error) (*DataUploadReconci
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = corev1.AddToScheme(scheme)
|
||||
err = corev1api.AddToScheme(scheme)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -243,7 +243,7 @@ func initDataUploaderReconcilerWithError(needError ...error) (*DataUploadReconci
|
|||
dataPathMgr,
|
||||
nil,
|
||||
map[string]nodeagent.BackupPVC{},
|
||||
corev1.ResourceRequirements{},
|
||||
corev1api.ResourceRequirements{},
|
||||
testclocks.NewFakeClock(now),
|
||||
"test-node",
|
||||
time.Minute*5,
|
||||
|
@ -272,7 +272,7 @@ type fakeSnapshotExposer struct {
|
|||
peekErr error
|
||||
}
|
||||
|
||||
func (f *fakeSnapshotExposer) Expose(ctx context.Context, ownerObject corev1.ObjectReference, param any) error {
|
||||
func (f *fakeSnapshotExposer) Expose(ctx context.Context, ownerObject corev1api.ObjectReference, param any) error {
|
||||
du := velerov2alpha1api.DataUpload{}
|
||||
err := f.kubeClient.Get(ctx, kbclient.ObjectKey{
|
||||
Name: dataUploadName,
|
||||
|
@ -289,8 +289,8 @@ func (f *fakeSnapshotExposer) Expose(ctx context.Context, ownerObject corev1.Obj
|
|||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeSnapshotExposer) GetExposed(ctx context.Context, du corev1.ObjectReference, tm time.Duration, para any) (*exposer.ExposeResult, error) {
|
||||
pod := &corev1.Pod{}
|
||||
func (f *fakeSnapshotExposer) GetExposed(ctx context.Context, du corev1api.ObjectReference, tm time.Duration, para any) (*exposer.ExposeResult, error) {
|
||||
pod := &corev1api.Pod{}
|
||||
err := f.kubeClient.Get(ctx, kbclient.ObjectKey{
|
||||
Name: dataUploadName,
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
|
@ -307,15 +307,15 @@ func (f *fakeSnapshotExposer) GetExposed(ctx context.Context, du corev1.ObjectRe
|
|||
return &exposer.ExposeResult{ByPod: exposer.ExposeByPod{HostingPod: pod, VolumeName: dataUploadName, NodeOS: pNodeOS}}, nil
|
||||
}
|
||||
|
||||
func (f *fakeSnapshotExposer) PeekExposed(ctx context.Context, ownerObject corev1.ObjectReference) error {
|
||||
func (f *fakeSnapshotExposer) PeekExposed(ctx context.Context, ownerObject corev1api.ObjectReference) error {
|
||||
return f.peekErr
|
||||
}
|
||||
|
||||
func (f *fakeSnapshotExposer) DiagnoseExpose(context.Context, corev1.ObjectReference) string {
|
||||
func (f *fakeSnapshotExposer) DiagnoseExpose(context.Context, corev1api.ObjectReference) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (f *fakeSnapshotExposer) CleanUp(context.Context, corev1.ObjectReference, string, string) {
|
||||
func (f *fakeSnapshotExposer) CleanUp(context.Context, corev1api.ObjectReference, string, string) {
|
||||
}
|
||||
|
||||
type fakeDataUploadFSBR struct {
|
||||
|
@ -348,8 +348,8 @@ func TestReconcile(t *testing.T) {
|
|||
tests := []struct {
|
||||
name string
|
||||
du *velerov2alpha1api.DataUpload
|
||||
pod *corev1.Pod
|
||||
pvc *corev1.PersistentVolumeClaim
|
||||
pod *corev1api.Pod
|
||||
pvc *corev1api.PersistentVolumeClaim
|
||||
snapshotExposerList map[velerov2alpha1api.SnapshotType]exposer.SnapshotExposer
|
||||
dataMgr *datapath.Manager
|
||||
expectedProcessed bool
|
||||
|
@ -394,7 +394,7 @@ func TestReconcile(t *testing.T) {
|
|||
{
|
||||
name: "Dataupload should be accepted",
|
||||
du: dataUploadBuilder().Result(),
|
||||
pod: builder.ForPod("fake-ns", dataUploadName).Volumes(&corev1.Volume{Name: "test-pvc"}).Result(),
|
||||
pod: builder.ForPod("fake-ns", dataUploadName).Volumes(&corev1api.Volume{Name: "test-pvc"}).Result(),
|
||||
pvc: builder.ForPersistentVolumeClaim("fake-ns", "test-pvc").Result(),
|
||||
expected: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseAccepted).Result(),
|
||||
expectedRequeue: ctrl.Result{},
|
||||
|
@ -402,7 +402,7 @@ func TestReconcile(t *testing.T) {
|
|||
{
|
||||
name: "Dataupload should fail to get PVC information",
|
||||
du: dataUploadBuilder().Result(),
|
||||
pod: builder.ForPod("fake-ns", dataUploadName).Volumes(&corev1.Volume{Name: "wrong-pvc"}).Result(),
|
||||
pod: builder.ForPod("fake-ns", dataUploadName).Volumes(&corev1api.Volume{Name: "wrong-pvc"}).Result(),
|
||||
expectedProcessed: true,
|
||||
expected: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseFailed).Result(),
|
||||
expectedRequeue: ctrl.Result{},
|
||||
|
@ -411,7 +411,7 @@ func TestReconcile(t *testing.T) {
|
|||
{
|
||||
name: "Dataupload should fail to get PVC attaching node",
|
||||
du: dataUploadBuilder().Result(),
|
||||
pod: builder.ForPod("fake-ns", dataUploadName).Volumes(&corev1.Volume{Name: "test-pvc"}).Result(),
|
||||
pod: builder.ForPod("fake-ns", dataUploadName).Volumes(&corev1api.Volume{Name: "test-pvc"}).Result(),
|
||||
pvc: builder.ForPersistentVolumeClaim("fake-ns", "test-pvc").StorageClass("fake-sc").Result(),
|
||||
expectedProcessed: true,
|
||||
expected: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseFailed).Result(),
|
||||
|
@ -421,7 +421,7 @@ func TestReconcile(t *testing.T) {
|
|||
{
|
||||
name: "Dataupload should fail because expected node doesn't exist",
|
||||
du: dataUploadBuilder().Result(),
|
||||
pod: builder.ForPod("fake-ns", dataUploadName).Volumes(&corev1.Volume{Name: "test-pvc"}).Result(),
|
||||
pod: builder.ForPod("fake-ns", dataUploadName).Volumes(&corev1api.Volume{Name: "test-pvc"}).Result(),
|
||||
pvc: builder.ForPersistentVolumeClaim("fake-ns", "test-pvc").Result(),
|
||||
removeNode: true,
|
||||
expectedProcessed: true,
|
||||
|
@ -437,14 +437,14 @@ func TestReconcile(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "Dataupload prepared should be completed",
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1.Volume{Name: "dataupload-1"}).Result(),
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1api.Volume{Name: "dataupload-1"}).Result(),
|
||||
du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhasePrepared).SnapshotType(fakeSnapshotType).Result(),
|
||||
expected: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseInProgress).Result(),
|
||||
expectedRequeue: ctrl.Result{},
|
||||
},
|
||||
{
|
||||
name: "Dataupload should fail if expose returns ambiguous nodeOS",
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1.Volume{Name: "dataupload-1"}).Result(),
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1api.Volume{Name: "dataupload-1"}).Result(),
|
||||
du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhasePrepared).SnapshotType(fakeSnapshotType).Result(),
|
||||
ambiguousNodeOS: true,
|
||||
expectedProcessed: true,
|
||||
|
@ -453,21 +453,21 @@ func TestReconcile(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "Dataupload with not enabled cancel",
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1.Volume{Name: "dataupload-1"}).Result(),
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1api.Volume{Name: "dataupload-1"}).Result(),
|
||||
du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseInProgress).SnapshotType(fakeSnapshotType).Cancel(false).Result(),
|
||||
expected: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseInProgress).Result(),
|
||||
expectedRequeue: ctrl.Result{},
|
||||
},
|
||||
{
|
||||
name: "Dataupload should be cancel",
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1.Volume{Name: "dataupload-1"}).Result(),
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1api.Volume{Name: "dataupload-1"}).Result(),
|
||||
du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseInProgress).SnapshotType(fakeSnapshotType).Cancel(true).Result(),
|
||||
expected: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseCanceling).Result(),
|
||||
expectedRequeue: ctrl.Result{},
|
||||
},
|
||||
{
|
||||
name: "Dataupload should be cancel with match node",
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1.Volume{Name: "dataupload-1"}).Result(),
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1api.Volume{Name: "dataupload-1"}).Result(),
|
||||
du: func() *velerov2alpha1api.DataUpload {
|
||||
du := dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseInProgress).SnapshotType(fakeSnapshotType).Cancel(true).Result()
|
||||
du.Status.Node = "test-node"
|
||||
|
@ -480,7 +480,7 @@ func TestReconcile(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "Dataupload should not be cancel with mismatch node",
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1.Volume{Name: "dataupload-1"}).Result(),
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1api.Volume{Name: "dataupload-1"}).Result(),
|
||||
du: func() *velerov2alpha1api.DataUpload {
|
||||
du := dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseInProgress).SnapshotType(fakeSnapshotType).Cancel(true).Result()
|
||||
du.Status.Node = "different_node"
|
||||
|
@ -493,14 +493,14 @@ func TestReconcile(t *testing.T) {
|
|||
{
|
||||
name: "runCancelableDataUpload is concurrent limited",
|
||||
dataMgr: datapath.NewManager(0),
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1.Volume{Name: "dataupload-1"}).Result(),
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1api.Volume{Name: "dataupload-1"}).Result(),
|
||||
du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhasePrepared).SnapshotType(fakeSnapshotType).Result(),
|
||||
expected: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhasePrepared).Result(),
|
||||
expectedRequeue: ctrl.Result{Requeue: true, RequeueAfter: time.Second * 5},
|
||||
},
|
||||
{
|
||||
name: "data path init error",
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1.Volume{Name: "dataupload-1"}).Result(),
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1api.Volume{Name: "dataupload-1"}).Result(),
|
||||
du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhasePrepared).SnapshotType(fakeSnapshotType).Result(),
|
||||
fsBRInitErr: errors.New("fake-data-path-init-error"),
|
||||
expectedProcessed: true,
|
||||
|
@ -509,7 +509,7 @@ func TestReconcile(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "Unable to update status to in progress for data download",
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1.Volume{Name: "dataupload-1"}).Result(),
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1api.Volume{Name: "dataupload-1"}).Result(),
|
||||
du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhasePrepared).SnapshotType(fakeSnapshotType).Result(),
|
||||
needErrs: []bool{false, false, false, true},
|
||||
expected: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhasePrepared).SnapshotType(fakeSnapshotType).Result(),
|
||||
|
@ -517,7 +517,7 @@ func TestReconcile(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "data path start error",
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1.Volume{Name: "dataupload-1"}).Result(),
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1api.Volume{Name: "dataupload-1"}).Result(),
|
||||
du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhasePrepared).SnapshotType(fakeSnapshotType).Result(),
|
||||
fsBRStartErr: errors.New("fake-data-path-start-error"),
|
||||
expectedProcessed: true,
|
||||
|
@ -538,7 +538,7 @@ func TestReconcile(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "Dataupload with enabled cancel",
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1.Volume{Name: "dataupload-1"}).Result(),
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1api.Volume{Name: "dataupload-1"}).Result(),
|
||||
du: func() *velerov2alpha1api.DataUpload {
|
||||
du := dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseAccepted).SnapshotType(fakeSnapshotType).Result()
|
||||
controllerutil.AddFinalizer(du, DataUploadDownloadFinalizer)
|
||||
|
@ -553,7 +553,7 @@ func TestReconcile(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "Dataupload with remove finalizer and should not be retrieved",
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1.Volume{Name: "dataupload-1"}).Result(),
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1api.Volume{Name: "dataupload-1"}).Result(),
|
||||
du: func() *velerov2alpha1api.DataUpload {
|
||||
du := dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseFailed).SnapshotType(fakeSnapshotType).Cancel(true).Result()
|
||||
controllerutil.AddFinalizer(du, DataUploadDownloadFinalizer)
|
||||
|
@ -812,13 +812,13 @@ func TestFindDataUploadForPod(t *testing.T) {
|
|||
tests := []struct {
|
||||
name string
|
||||
du *velerov2alpha1api.DataUpload
|
||||
pod *corev1.Pod
|
||||
pod *corev1api.Pod
|
||||
checkFunc func(*velerov2alpha1api.DataUpload, []reconcile.Request)
|
||||
}{
|
||||
{
|
||||
name: "find dataUpload for pod",
|
||||
du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseAccepted).Result(),
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Labels(map[string]string{velerov1api.DataUploadLabel: dataUploadName}).Status(corev1.PodStatus{Phase: corev1.PodRunning}).Result(),
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Labels(map[string]string{velerov1api.DataUploadLabel: dataUploadName}).Status(corev1api.PodStatus{Phase: corev1api.PodRunning}).Result(),
|
||||
checkFunc: func(du *velerov2alpha1api.DataUpload, requests []reconcile.Request) {
|
||||
// Assert that the function returns a single request
|
||||
assert.Len(t, requests, 1)
|
||||
|
@ -1087,23 +1087,23 @@ func (dt *duResumeTestHelper) resumeCancellableDataPath(_ *DataUploadReconciler,
|
|||
return dt.resumeErr
|
||||
}
|
||||
|
||||
func (dt *duResumeTestHelper) Expose(context.Context, corev1.ObjectReference, any) error {
|
||||
func (dt *duResumeTestHelper) Expose(context.Context, corev1api.ObjectReference, any) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dt *duResumeTestHelper) GetExposed(context.Context, corev1.ObjectReference, time.Duration, any) (*exposer.ExposeResult, error) {
|
||||
func (dt *duResumeTestHelper) GetExposed(context.Context, corev1api.ObjectReference, time.Duration, any) (*exposer.ExposeResult, error) {
|
||||
return dt.exposeResult, dt.getExposeErr
|
||||
}
|
||||
|
||||
func (dt *duResumeTestHelper) PeekExposed(context.Context, corev1.ObjectReference) error {
|
||||
func (dt *duResumeTestHelper) PeekExposed(context.Context, corev1api.ObjectReference) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dt *duResumeTestHelper) DiagnoseExpose(context.Context, corev1.ObjectReference) string {
|
||||
func (dt *duResumeTestHelper) DiagnoseExpose(context.Context, corev1api.ObjectReference) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (dt *duResumeTestHelper) CleanUp(context.Context, corev1.ObjectReference, string, string) {}
|
||||
func (dt *duResumeTestHelper) CleanUp(context.Context, corev1api.ObjectReference, string, string) {}
|
||||
|
||||
func (dt *duResumeTestHelper) newMicroServiceBRWatcher(kbclient.Client, kubernetes.Interface, manager.Manager, string, string, string, string, string, string,
|
||||
datapath.Callbacks, logrus.FieldLogger) datapath.AsyncBR {
|
||||
|
@ -1276,7 +1276,7 @@ func TestResumeCancellableBackup(t *testing.T) {
|
|||
du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseAccepted).Node("node-1").Result(),
|
||||
exposeResult: &exposer.ExposeResult{
|
||||
ByPod: exposer.ExposeByPod{
|
||||
HostingPod: &corev1.Pod{},
|
||||
HostingPod: &corev1api.Pod{},
|
||||
},
|
||||
},
|
||||
mockInit: true,
|
||||
|
@ -1289,7 +1289,7 @@ func TestResumeCancellableBackup(t *testing.T) {
|
|||
du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseAccepted).Node("node-1").Result(),
|
||||
exposeResult: &exposer.ExposeResult{
|
||||
ByPod: exposer.ExposeByPod{
|
||||
HostingPod: &corev1.Pod{},
|
||||
HostingPod: &corev1api.Pod{},
|
||||
},
|
||||
},
|
||||
mockInit: true,
|
||||
|
@ -1303,7 +1303,7 @@ func TestResumeCancellableBackup(t *testing.T) {
|
|||
du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseAccepted).Node("node-1").Result(),
|
||||
exposeResult: &exposer.ExposeResult{
|
||||
ByPod: exposer.ExposeByPod{
|
||||
HostingPod: &corev1.Pod{},
|
||||
HostingPod: &corev1api.Pod{},
|
||||
},
|
||||
},
|
||||
mockInit: true,
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -145,7 +145,7 @@ func (r *PodVolumeBackupReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
|||
return r.errorOut(ctx, &pvb, err, "error updating PodVolumeBackup status", log)
|
||||
}
|
||||
|
||||
var pod corev1.Pod
|
||||
var pod corev1api.Pod
|
||||
podNamespacedName := client.ObjectKey{
|
||||
Namespace: pvb.Spec.Pod.Namespace,
|
||||
Name: pvb.Spec.Pod.Name,
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
@ -61,7 +61,7 @@ func pvbBuilder() *builder.PodVolumeBackupBuilder {
|
|||
func podBuilder() *builder.PodBuilder {
|
||||
return builder.
|
||||
ForPod(velerov1api.DefaultNamespace, name).
|
||||
Volumes(&corev1.Volume{Name: "pvb-1-volume"})
|
||||
Volumes(&corev1api.Volume{Name: "pvb-1-volume"})
|
||||
}
|
||||
|
||||
func bslBuilder() *builder.BackupStorageLocationBuilder {
|
||||
|
@ -126,7 +126,7 @@ func (b *fakeFSBR) Close(ctx context.Context) {
|
|||
var _ = Describe("PodVolumeBackup Reconciler", func() {
|
||||
type request struct {
|
||||
pvb *velerov1api.PodVolumeBackup
|
||||
pod *corev1.Pod
|
||||
pod *corev1api.Pod
|
||||
bsl *velerov1api.BackupStorageLocation
|
||||
backupRepo *velerov1api.BackupRepository
|
||||
expectedProcessed bool
|
||||
|
|
|
@ -29,7 +29,7 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
clocktesting "k8s.io/utils/clock/testing"
|
||||
|
@ -790,7 +790,7 @@ func TestValidateAndCompleteWithResourceModifierSpecified(t *testing.T) {
|
|||
},
|
||||
Spec: velerov1api.RestoreSpec{
|
||||
BackupName: "backup-1",
|
||||
ResourceModifier: &corev1.TypedLocalObjectReference{
|
||||
ResourceModifier: &corev1api.TypedLocalObjectReference{
|
||||
Kind: resourcemodifiers.ConfigmapRefType,
|
||||
Name: "test-configmap",
|
||||
},
|
||||
|
@ -820,14 +820,14 @@ func TestValidateAndCompleteWithResourceModifierSpecified(t *testing.T) {
|
|||
},
|
||||
Spec: velerov1api.RestoreSpec{
|
||||
BackupName: "backup-1",
|
||||
ResourceModifier: &corev1.TypedLocalObjectReference{
|
||||
ResourceModifier: &corev1api.TypedLocalObjectReference{
|
||||
Kind: resourcemodifiers.ConfigmapRefType,
|
||||
Name: "test-configmap",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
cm1 := &corev1.ConfigMap{
|
||||
cm1 := &corev1api.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-configmap",
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
|
@ -848,7 +848,7 @@ func TestValidateAndCompleteWithResourceModifierSpecified(t *testing.T) {
|
|||
},
|
||||
Spec: velerov1api.RestoreSpec{
|
||||
BackupName: "backup-1",
|
||||
ResourceModifier: &corev1.TypedLocalObjectReference{
|
||||
ResourceModifier: &corev1api.TypedLocalObjectReference{
|
||||
// intentional to ensure case insensitivity works as expected
|
||||
Kind: "confIGMaP",
|
||||
Name: "test-configmap-invalid",
|
||||
|
@ -856,7 +856,7 @@ func TestValidateAndCompleteWithResourceModifierSpecified(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
invalidVersionCm := &corev1.ConfigMap{
|
||||
invalidVersionCm := &corev1api.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-configmap-invalid",
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
|
@ -877,14 +877,14 @@ func TestValidateAndCompleteWithResourceModifierSpecified(t *testing.T) {
|
|||
},
|
||||
Spec: velerov1api.RestoreSpec{
|
||||
BackupName: "backup-1",
|
||||
ResourceModifier: &corev1.TypedLocalObjectReference{
|
||||
ResourceModifier: &corev1api.TypedLocalObjectReference{
|
||||
Kind: resourcemodifiers.ConfigmapRefType,
|
||||
Name: "test-configmap-invalid-operator",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
invalidOperatorCm := &corev1.ConfigMap{
|
||||
invalidOperatorCm := &corev1api.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-configmap-invalid-operator",
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
storagev1api "k8s.io/api/storage/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -340,7 +340,7 @@ func (ctx *finalizerContext) patchDynamicPVWithVolumeInfo() (errs results.Result
|
|||
|
||||
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, ctx.resourceTimeout, true, func(context.Context) (bool, error) {
|
||||
// wait for PVC to be bound
|
||||
pvc := &v1.PersistentVolumeClaim{}
|
||||
pvc := &corev1api.PersistentVolumeClaim{}
|
||||
err := ctx.crClient.Get(context.Background(), client.ObjectKey{Name: volInfo.PVCName, Namespace: restoredNamespace}, pvc)
|
||||
if apierrors.IsNotFound(err) {
|
||||
log.Debug("error not finding PVC")
|
||||
|
@ -364,7 +364,7 @@ func (ctx *finalizerContext) patchDynamicPVWithVolumeInfo() (errs results.Result
|
|||
// We are handling a common but specific scenario where a PVC is in a pending state and uses a storage class with
|
||||
// VolumeBindingMode set to WaitForFirstConsumer. In this case, the PV patch step is skipped to avoid
|
||||
// failures due to the PVC not being bound, which could cause a timeout and result in a failed restore.
|
||||
if pvc.Status.Phase == v1.ClaimPending {
|
||||
if pvc.Status.Phase == corev1api.ClaimPending {
|
||||
// check if storage class used has VolumeBindingMode as WaitForFirstConsumer
|
||||
scName := *pvc.Spec.StorageClassName
|
||||
sc := &storagev1api.StorageClass{}
|
||||
|
@ -382,14 +382,14 @@ func (ctx *finalizerContext) patchDynamicPVWithVolumeInfo() (errs results.Result
|
|||
}
|
||||
}
|
||||
|
||||
if pvc.Status.Phase != v1.ClaimBound || pvc.Spec.VolumeName == "" {
|
||||
if pvc.Status.Phase != corev1api.ClaimBound || pvc.Spec.VolumeName == "" {
|
||||
log.Debugf("PVC: %s not ready", pvc.Name)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// wait for PV to be bound
|
||||
pvName := pvc.Spec.VolumeName
|
||||
pv := &v1.PersistentVolume{}
|
||||
pv := &corev1api.PersistentVolume{}
|
||||
err = ctx.crClient.Get(context.Background(), client.ObjectKey{Name: pvName}, pv)
|
||||
if apierrors.IsNotFound(err) {
|
||||
log.Debugf("error not finding PV: %s", pvName)
|
||||
|
@ -399,7 +399,7 @@ func (ctx *finalizerContext) patchDynamicPVWithVolumeInfo() (errs results.Result
|
|||
return false, err
|
||||
}
|
||||
|
||||
if pv.Spec.ClaimRef == nil || pv.Status.Phase != v1.VolumeBound {
|
||||
if pv.Spec.ClaimRef == nil || pv.Status.Phase != corev1api.VolumeBound {
|
||||
log.Debugf("PV: %s not ready", pvName)
|
||||
return false, nil
|
||||
}
|
||||
|
@ -414,7 +414,7 @@ func (ctx *finalizerContext) patchDynamicPVWithVolumeInfo() (errs results.Result
|
|||
if needPatch(pv, volInfo.PVInfo) {
|
||||
updatedPV := pv.DeepCopy()
|
||||
updatedPV.Labels = volInfo.PVInfo.Labels
|
||||
updatedPV.Spec.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimPolicy(volInfo.PVInfo.ReclaimPolicy)
|
||||
updatedPV.Spec.PersistentVolumeReclaimPolicy = corev1api.PersistentVolumeReclaimPolicy(volInfo.PVInfo.ReclaimPolicy)
|
||||
if err := kubeutil.PatchResource(pv, updatedPV, ctx.crClient); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -443,8 +443,8 @@ func (ctx *finalizerContext) patchDynamicPVWithVolumeInfo() (errs results.Result
|
|||
return errs
|
||||
}
|
||||
|
||||
func needPatch(newPV *v1.PersistentVolume, pvInfo *volume.PVInfo) bool {
|
||||
if newPV.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimPolicy(pvInfo.ReclaimPolicy) {
|
||||
func needPatch(newPV *corev1api.PersistentVolume, pvInfo *volume.PVInfo) bool {
|
||||
if newPV.Spec.PersistentVolumeReclaimPolicy != corev1api.PersistentVolumeReclaimPolicy(pvInfo.ReclaimPolicy) {
|
||||
return true
|
||||
}
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
@ -70,8 +70,8 @@ type microServiceBRWatcher struct {
|
|||
thisPod string
|
||||
thisContainer string
|
||||
associatedObject string
|
||||
eventCh chan *v1.Event
|
||||
podCh chan *v1.Pod
|
||||
eventCh chan *corev1api.Event
|
||||
podCh chan *corev1api.Pod
|
||||
startedFromEvent bool
|
||||
terminatedFromEvent bool
|
||||
wgWatcher sync.WaitGroup
|
||||
|
@ -95,8 +95,8 @@ func newMicroServiceBRWatcher(client client.Client, kubeClient kubernetes.Interf
|
|||
thisPod: podName,
|
||||
thisContainer: containerName,
|
||||
associatedObject: associatedObject,
|
||||
eventCh: make(chan *v1.Event, 10),
|
||||
podCh: make(chan *v1.Pod, 2),
|
||||
eventCh: make(chan *corev1api.Event, 10),
|
||||
podCh: make(chan *corev1api.Pod, 2),
|
||||
wgWatcher: sync.WaitGroup{},
|
||||
log: log,
|
||||
}
|
||||
|
@ -105,12 +105,12 @@ func newMicroServiceBRWatcher(client client.Client, kubeClient kubernetes.Interf
|
|||
}
|
||||
|
||||
func (ms *microServiceBRWatcher) Init(ctx context.Context, param any) error {
|
||||
eventInformer, err := ms.mgr.GetCache().GetInformer(ctx, &v1.Event{})
|
||||
eventInformer, err := ms.mgr.GetCache().GetInformer(ctx, &corev1api.Event{})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error getting event informer")
|
||||
}
|
||||
|
||||
podInformer, err := ms.mgr.GetCache().GetInformer(ctx, &v1.Pod{})
|
||||
podInformer, err := ms.mgr.GetCache().GetInformer(ctx, &corev1api.Pod{})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error getting pod informer")
|
||||
}
|
||||
|
@ -118,7 +118,7 @@ func (ms *microServiceBRWatcher) Init(ctx context.Context, param any) error {
|
|||
eventHandler, err := eventInformer.AddEventHandler(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj any) {
|
||||
evt := obj.(*v1.Event)
|
||||
evt := obj.(*corev1api.Event)
|
||||
if evt.InvolvedObject.Namespace != ms.namespace || evt.InvolvedObject.Name != ms.associatedObject {
|
||||
return
|
||||
}
|
||||
|
@ -126,7 +126,7 @@ func (ms *microServiceBRWatcher) Init(ctx context.Context, param any) error {
|
|||
ms.eventCh <- evt
|
||||
},
|
||||
UpdateFunc: func(_, obj any) {
|
||||
evt := obj.(*v1.Event)
|
||||
evt := obj.(*corev1api.Event)
|
||||
if evt.InvolvedObject.Namespace != ms.namespace || evt.InvolvedObject.Name != ms.associatedObject {
|
||||
return
|
||||
}
|
||||
|
@ -142,12 +142,12 @@ func (ms *microServiceBRWatcher) Init(ctx context.Context, param any) error {
|
|||
podHandler, err := podInformer.AddEventHandler(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: func(_, obj any) {
|
||||
pod := obj.(*v1.Pod)
|
||||
pod := obj.(*corev1api.Pod)
|
||||
if pod.Namespace != ms.namespace || pod.Name != ms.thisPod {
|
||||
return
|
||||
}
|
||||
|
||||
if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed {
|
||||
if pod.Status.Phase == corev1api.PodSucceeded || pod.Status.Phase == corev1api.PodFailed {
|
||||
ms.podCh <- pod
|
||||
}
|
||||
},
|
||||
|
@ -230,7 +230,7 @@ func (ms *microServiceBRWatcher) StartRestore(snapshotID string, target AccessPo
|
|||
}
|
||||
|
||||
func (ms *microServiceBRWatcher) reEnsureThisPod(ctx context.Context) error {
|
||||
thisPod := &v1.Pod{}
|
||||
thisPod := &corev1api.Pod{}
|
||||
if err := ms.client.Get(ctx, types.NamespacedName{
|
||||
Namespace: ms.namespace,
|
||||
Name: ms.thisPod,
|
||||
|
@ -238,7 +238,7 @@ func (ms *microServiceBRWatcher) reEnsureThisPod(ctx context.Context) error {
|
|||
return errors.Wrapf(err, "error getting this pod %s", ms.thisPod)
|
||||
}
|
||||
|
||||
if thisPod.Status.Phase == v1.PodSucceeded || thisPod.Status.Phase == v1.PodFailed {
|
||||
if thisPod.Status.Phase == corev1api.PodSucceeded || thisPod.Status.Phase == corev1api.PodFailed {
|
||||
ms.podCh <- thisPod
|
||||
ms.log.WithField("this pod", ms.thisPod).Infof("This pod comes to terminital status %s before watch start", thisPod.Status.Phase)
|
||||
}
|
||||
|
@ -264,7 +264,7 @@ func (ms *microServiceBRWatcher) startWatch() {
|
|||
ms.wgWatcher.Done()
|
||||
}()
|
||||
|
||||
var lastPod *v1.Pod
|
||||
var lastPod *corev1api.Pod
|
||||
|
||||
watchLoop:
|
||||
for {
|
||||
|
@ -319,7 +319,7 @@ func (ms *microServiceBRWatcher) startWatch() {
|
|||
|
||||
logger.Info("Calling callback on data path pod termination")
|
||||
|
||||
if lastPod.Status.Phase == v1.PodSucceeded {
|
||||
if lastPod.Status.Phase == corev1api.PodSucceeded {
|
||||
result := funcGetResultFromMessage(ms.taskType, terminateMessage, ms.log)
|
||||
ms.callbacks.OnProgress(ms.ctx, ms.namespace, ms.taskName, getCompletionProgressFromResult(ms.taskType, result))
|
||||
ms.callbacks.OnCompleted(ms.ctx, ms.namespace, ms.taskName, result)
|
||||
|
@ -335,7 +335,7 @@ func (ms *microServiceBRWatcher) startWatch() {
|
|||
}()
|
||||
}
|
||||
|
||||
func (ms *microServiceBRWatcher) onEvent(evt *v1.Event) {
|
||||
func (ms *microServiceBRWatcher) onEvent(evt *corev1api.Event) {
|
||||
switch evt.Reason {
|
||||
case EventReasonStarted:
|
||||
ms.startedFromEvent = true
|
||||
|
|
|
@ -29,7 +29,7 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
kubeclientfake "k8s.io/client-go/kubernetes/fake"
|
||||
|
@ -61,7 +61,7 @@ func TestReEnsureThisPod(t *testing.T) {
|
|||
namespace: "velero",
|
||||
thisPod: "fake-pod-1",
|
||||
kubeClientObj: []runtime.Object{
|
||||
builder.ForPod("velero", "fake-pod-1").Phase(v1.PodRunning).Result(),
|
||||
builder.ForPod("velero", "fake-pod-1").Phase(corev1api.PodRunning).Result(),
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -69,7 +69,7 @@ func TestReEnsureThisPod(t *testing.T) {
|
|||
namespace: "velero",
|
||||
thisPod: "fake-pod-1",
|
||||
kubeClientObj: []runtime.Object{
|
||||
builder.ForPod("velero", "fake-pod-1").Phase(v1.PodSucceeded).Result(),
|
||||
builder.ForPod("velero", "fake-pod-1").Phase(corev1api.PodSucceeded).Result(),
|
||||
},
|
||||
expectChan: true,
|
||||
},
|
||||
|
@ -78,7 +78,7 @@ func TestReEnsureThisPod(t *testing.T) {
|
|||
namespace: "velero",
|
||||
thisPod: "fake-pod-1",
|
||||
kubeClientObj: []runtime.Object{
|
||||
builder.ForPod("velero", "fake-pod-1").Phase(v1.PodFailed).Result(),
|
||||
builder.ForPod("velero", "fake-pod-1").Phase(corev1api.PodFailed).Result(),
|
||||
},
|
||||
expectChan: true,
|
||||
},
|
||||
|
@ -87,7 +87,7 @@ func TestReEnsureThisPod(t *testing.T) {
|
|||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
v1.AddToScheme(scheme)
|
||||
corev1api.AddToScheme(scheme)
|
||||
fakeClientBuilder := fake.NewClientBuilder()
|
||||
fakeClientBuilder = fakeClientBuilder.WithScheme(scheme)
|
||||
|
||||
|
@ -97,7 +97,7 @@ func TestReEnsureThisPod(t *testing.T) {
|
|||
namespace: test.namespace,
|
||||
thisPod: test.thisPod,
|
||||
client: fakeClient,
|
||||
podCh: make(chan *v1.Pod, 2),
|
||||
podCh: make(chan *corev1api.Pod, 2),
|
||||
log: velerotest.NewLogger(),
|
||||
}
|
||||
|
||||
|
@ -124,7 +124,7 @@ type startWatchFake struct {
|
|||
progress int
|
||||
}
|
||||
|
||||
func (sw *startWatchFake) getPodContainerTerminateMessage(pod *v1.Pod, container string) string {
|
||||
func (sw *startWatchFake) getPodContainerTerminateMessage(pod *corev1api.Pod, container string) string {
|
||||
return sw.terminationMessage
|
||||
}
|
||||
|
||||
|
@ -153,7 +153,7 @@ func (sw *startWatchFake) OnProgress(ctx context.Context, namespace string, task
|
|||
}
|
||||
|
||||
type insertEvent struct {
|
||||
event *v1.Event
|
||||
event *corev1api.Event
|
||||
after time.Duration
|
||||
delay time.Duration
|
||||
}
|
||||
|
@ -166,7 +166,7 @@ func TestStartWatch(t *testing.T) {
|
|||
thisContainer string
|
||||
terminationMessage string
|
||||
redirectLogErr error
|
||||
insertPod *v1.Pod
|
||||
insertPod *corev1api.Pod
|
||||
insertEventsBefore []insertEvent
|
||||
insertEventsAfter []insertEvent
|
||||
ctxCancel bool
|
||||
|
@ -187,16 +187,16 @@ func TestStartWatch(t *testing.T) {
|
|||
name: "completed with rantional sequence",
|
||||
thisPod: "fak-pod-1",
|
||||
thisContainer: "fake-container-1",
|
||||
insertPod: builder.ForPod("velero", "fake-pod-1").Phase(v1.PodSucceeded).Result(),
|
||||
insertPod: builder.ForPod("velero", "fake-pod-1").Phase(corev1api.PodSucceeded).Result(),
|
||||
insertEventsBefore: []insertEvent{
|
||||
{
|
||||
event: &v1.Event{Reason: EventReasonStarted},
|
||||
event: &corev1api.Event{Reason: EventReasonStarted},
|
||||
},
|
||||
{
|
||||
event: &v1.Event{Reason: EventReasonCompleted},
|
||||
event: &corev1api.Event{Reason: EventReasonCompleted},
|
||||
},
|
||||
{
|
||||
event: &v1.Event{Reason: EventReasonStopped},
|
||||
event: &corev1api.Event{Reason: EventReasonStopped},
|
||||
delay: time.Second,
|
||||
},
|
||||
},
|
||||
|
@ -209,16 +209,16 @@ func TestStartWatch(t *testing.T) {
|
|||
name: "completed",
|
||||
thisPod: "fak-pod-1",
|
||||
thisContainer: "fake-container-1",
|
||||
insertPod: builder.ForPod("velero", "fake-pod-1").Phase(v1.PodSucceeded).Result(),
|
||||
insertPod: builder.ForPod("velero", "fake-pod-1").Phase(corev1api.PodSucceeded).Result(),
|
||||
insertEventsBefore: []insertEvent{
|
||||
{
|
||||
event: &v1.Event{Reason: EventReasonStarted},
|
||||
event: &corev1api.Event{Reason: EventReasonStarted},
|
||||
},
|
||||
{
|
||||
event: &v1.Event{Reason: EventReasonCompleted},
|
||||
event: &corev1api.Event{Reason: EventReasonCompleted},
|
||||
},
|
||||
{
|
||||
event: &v1.Event{Reason: EventReasonStopped},
|
||||
event: &corev1api.Event{Reason: EventReasonStopped},
|
||||
},
|
||||
},
|
||||
expectStartEvent: true,
|
||||
|
@ -230,16 +230,16 @@ func TestStartWatch(t *testing.T) {
|
|||
name: "completed with redirect error",
|
||||
thisPod: "fak-pod-1",
|
||||
thisContainer: "fake-container-1",
|
||||
insertPod: builder.ForPod("velero", "fake-pod-1").Phase(v1.PodSucceeded).Result(),
|
||||
insertPod: builder.ForPod("velero", "fake-pod-1").Phase(corev1api.PodSucceeded).Result(),
|
||||
insertEventsBefore: []insertEvent{
|
||||
{
|
||||
event: &v1.Event{Reason: EventReasonStarted},
|
||||
event: &corev1api.Event{Reason: EventReasonStarted},
|
||||
},
|
||||
{
|
||||
event: &v1.Event{Reason: EventReasonCompleted},
|
||||
event: &corev1api.Event{Reason: EventReasonCompleted},
|
||||
},
|
||||
{
|
||||
event: &v1.Event{Reason: EventReasonStopped},
|
||||
event: &corev1api.Event{Reason: EventReasonStopped},
|
||||
},
|
||||
},
|
||||
redirectLogErr: errors.New("fake-error"),
|
||||
|
@ -252,15 +252,15 @@ func TestStartWatch(t *testing.T) {
|
|||
name: "complete but terminated event not received in time",
|
||||
thisPod: "fak-pod-1",
|
||||
thisContainer: "fake-container-1",
|
||||
insertPod: builder.ForPod("velero", "fake-pod-1").Phase(v1.PodSucceeded).Result(),
|
||||
insertPod: builder.ForPod("velero", "fake-pod-1").Phase(corev1api.PodSucceeded).Result(),
|
||||
insertEventsBefore: []insertEvent{
|
||||
{
|
||||
event: &v1.Event{Reason: EventReasonStarted},
|
||||
event: &corev1api.Event{Reason: EventReasonStarted},
|
||||
},
|
||||
},
|
||||
insertEventsAfter: []insertEvent{
|
||||
{
|
||||
event: &v1.Event{Reason: EventReasonStarted},
|
||||
event: &corev1api.Event{Reason: EventReasonStarted},
|
||||
after: time.Second * 6,
|
||||
},
|
||||
},
|
||||
|
@ -272,18 +272,18 @@ func TestStartWatch(t *testing.T) {
|
|||
name: "complete but terminated event not received immediately",
|
||||
thisPod: "fak-pod-1",
|
||||
thisContainer: "fake-container-1",
|
||||
insertPod: builder.ForPod("velero", "fake-pod-1").Phase(v1.PodSucceeded).Result(),
|
||||
insertPod: builder.ForPod("velero", "fake-pod-1").Phase(corev1api.PodSucceeded).Result(),
|
||||
insertEventsBefore: []insertEvent{
|
||||
{
|
||||
event: &v1.Event{Reason: EventReasonStarted},
|
||||
event: &corev1api.Event{Reason: EventReasonStarted},
|
||||
},
|
||||
},
|
||||
insertEventsAfter: []insertEvent{
|
||||
{
|
||||
event: &v1.Event{Reason: EventReasonCompleted},
|
||||
event: &corev1api.Event{Reason: EventReasonCompleted},
|
||||
},
|
||||
{
|
||||
event: &v1.Event{Reason: EventReasonStopped},
|
||||
event: &corev1api.Event{Reason: EventReasonStopped},
|
||||
delay: time.Second,
|
||||
},
|
||||
},
|
||||
|
@ -296,22 +296,22 @@ func TestStartWatch(t *testing.T) {
|
|||
name: "completed with progress",
|
||||
thisPod: "fak-pod-1",
|
||||
thisContainer: "fake-container-1",
|
||||
insertPod: builder.ForPod("velero", "fake-pod-1").Phase(v1.PodSucceeded).Result(),
|
||||
insertPod: builder.ForPod("velero", "fake-pod-1").Phase(corev1api.PodSucceeded).Result(),
|
||||
insertEventsBefore: []insertEvent{
|
||||
{
|
||||
event: &v1.Event{Reason: EventReasonStarted},
|
||||
event: &corev1api.Event{Reason: EventReasonStarted},
|
||||
},
|
||||
{
|
||||
event: &v1.Event{Reason: EventReasonProgress, Message: "fake-progress-1"},
|
||||
event: &corev1api.Event{Reason: EventReasonProgress, Message: "fake-progress-1"},
|
||||
},
|
||||
{
|
||||
event: &v1.Event{Reason: EventReasonProgress, Message: "fake-progress-2"},
|
||||
event: &corev1api.Event{Reason: EventReasonProgress, Message: "fake-progress-2"},
|
||||
},
|
||||
{
|
||||
event: &v1.Event{Reason: EventReasonCompleted},
|
||||
event: &corev1api.Event{Reason: EventReasonCompleted},
|
||||
},
|
||||
{
|
||||
event: &v1.Event{Reason: EventReasonStopped},
|
||||
event: &corev1api.Event{Reason: EventReasonStopped},
|
||||
delay: time.Second,
|
||||
},
|
||||
},
|
||||
|
@ -324,16 +324,16 @@ func TestStartWatch(t *testing.T) {
|
|||
name: "failed",
|
||||
thisPod: "fak-pod-1",
|
||||
thisContainer: "fake-container-1",
|
||||
insertPod: builder.ForPod("velero", "fake-pod-1").Phase(v1.PodFailed).Result(),
|
||||
insertPod: builder.ForPod("velero", "fake-pod-1").Phase(corev1api.PodFailed).Result(),
|
||||
insertEventsBefore: []insertEvent{
|
||||
{
|
||||
event: &v1.Event{Reason: EventReasonStarted},
|
||||
event: &corev1api.Event{Reason: EventReasonStarted},
|
||||
},
|
||||
{
|
||||
event: &v1.Event{Reason: EventReasonCancelled},
|
||||
event: &corev1api.Event{Reason: EventReasonCancelled},
|
||||
},
|
||||
{
|
||||
event: &v1.Event{Reason: EventReasonStopped},
|
||||
event: &corev1api.Event{Reason: EventReasonStopped},
|
||||
},
|
||||
},
|
||||
terminationMessage: "fake-termination-message-1",
|
||||
|
@ -345,7 +345,7 @@ func TestStartWatch(t *testing.T) {
|
|||
name: "pod crash",
|
||||
thisPod: "fak-pod-1",
|
||||
thisContainer: "fake-container-1",
|
||||
insertPod: builder.ForPod("velero", "fake-pod-1").Phase(v1.PodFailed).Result(),
|
||||
insertPod: builder.ForPod("velero", "fake-pod-1").Phase(corev1api.PodFailed).Result(),
|
||||
terminationMessage: "fake-termination-message-2",
|
||||
expectFail: true,
|
||||
},
|
||||
|
@ -353,16 +353,16 @@ func TestStartWatch(t *testing.T) {
|
|||
name: "canceled",
|
||||
thisPod: "fak-pod-1",
|
||||
thisContainer: "fake-container-1",
|
||||
insertPod: builder.ForPod("velero", "fake-pod-1").Phase(v1.PodFailed).Result(),
|
||||
insertPod: builder.ForPod("velero", "fake-pod-1").Phase(corev1api.PodFailed).Result(),
|
||||
insertEventsBefore: []insertEvent{
|
||||
{
|
||||
event: &v1.Event{Reason: EventReasonStarted},
|
||||
event: &corev1api.Event{Reason: EventReasonStarted},
|
||||
},
|
||||
{
|
||||
event: &v1.Event{Reason: EventReasonCancelled},
|
||||
event: &corev1api.Event{Reason: EventReasonCancelled},
|
||||
},
|
||||
{
|
||||
event: &v1.Event{Reason: EventReasonStopped},
|
||||
event: &corev1api.Event{Reason: EventReasonStopped},
|
||||
},
|
||||
},
|
||||
terminationMessage: fmt.Sprintf("Failed to init data path service for DataUpload %s: %v", "fake-du-name", errors.New(ErrCancelled)),
|
||||
|
@ -390,8 +390,8 @@ func TestStartWatch(t *testing.T) {
|
|||
namespace: test.namespace,
|
||||
thisPod: test.thisPod,
|
||||
thisContainer: test.thisContainer,
|
||||
podCh: make(chan *v1.Pod, 2),
|
||||
eventCh: make(chan *v1.Event, 10),
|
||||
podCh: make(chan *corev1api.Pod, 2),
|
||||
eventCh: make(chan *corev1api.Event, 10),
|
||||
log: velerotest.NewLogger(),
|
||||
callbacks: Callbacks{
|
||||
OnCompleted: sw.OnCompleted,
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
mock "github.com/stretchr/testify/mock"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
version "k8s.io/apimachinery/pkg/version"
|
||||
)
|
||||
|
@ -17,15 +17,15 @@ type Helper struct {
|
|||
}
|
||||
|
||||
// APIGroups provides a mock function with given fields:
|
||||
func (_m *Helper) APIGroups() []v1.APIGroup {
|
||||
func (_m *Helper) APIGroups() []metav1.APIGroup {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 []v1.APIGroup
|
||||
if rf, ok := ret.Get(0).(func() []v1.APIGroup); ok {
|
||||
var r0 []metav1.APIGroup
|
||||
if rf, ok := ret.Get(0).(func() []metav1.APIGroup); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]v1.APIGroup)
|
||||
r0 = ret.Get(0).([]metav1.APIGroup)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -33,13 +33,13 @@ func (_m *Helper) APIGroups() []v1.APIGroup {
|
|||
}
|
||||
|
||||
// KindFor provides a mock function with given fields: input
|
||||
func (_m *Helper) KindFor(input schema.GroupVersionKind) (schema.GroupVersionResource, v1.APIResource, error) {
|
||||
func (_m *Helper) KindFor(input schema.GroupVersionKind) (schema.GroupVersionResource, metav1.APIResource, error) {
|
||||
ret := _m.Called(input)
|
||||
|
||||
var r0 schema.GroupVersionResource
|
||||
var r1 v1.APIResource
|
||||
var r1 metav1.APIResource
|
||||
var r2 error
|
||||
if rf, ok := ret.Get(0).(func(schema.GroupVersionKind) (schema.GroupVersionResource, v1.APIResource, error)); ok {
|
||||
if rf, ok := ret.Get(0).(func(schema.GroupVersionKind) (schema.GroupVersionResource, metav1.APIResource, error)); ok {
|
||||
return rf(input)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(schema.GroupVersionKind) schema.GroupVersionResource); ok {
|
||||
|
@ -48,10 +48,10 @@ func (_m *Helper) KindFor(input schema.GroupVersionKind) (schema.GroupVersionRes
|
|||
r0 = ret.Get(0).(schema.GroupVersionResource)
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(schema.GroupVersionKind) v1.APIResource); ok {
|
||||
if rf, ok := ret.Get(1).(func(schema.GroupVersionKind) metav1.APIResource); ok {
|
||||
r1 = rf(input)
|
||||
} else {
|
||||
r1 = ret.Get(1).(v1.APIResource)
|
||||
r1 = ret.Get(1).(metav1.APIResource)
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(2).(func(schema.GroupVersionKind) error); ok {
|
||||
|
@ -78,13 +78,13 @@ func (_m *Helper) Refresh() error {
|
|||
}
|
||||
|
||||
// ResourceFor provides a mock function with given fields: input
|
||||
func (_m *Helper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, v1.APIResource, error) {
|
||||
func (_m *Helper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, metav1.APIResource, error) {
|
||||
ret := _m.Called(input)
|
||||
|
||||
var r0 schema.GroupVersionResource
|
||||
var r1 v1.APIResource
|
||||
var r1 metav1.APIResource
|
||||
var r2 error
|
||||
if rf, ok := ret.Get(0).(func(schema.GroupVersionResource) (schema.GroupVersionResource, v1.APIResource, error)); ok {
|
||||
if rf, ok := ret.Get(0).(func(schema.GroupVersionResource) (schema.GroupVersionResource, metav1.APIResource, error)); ok {
|
||||
return rf(input)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(schema.GroupVersionResource) schema.GroupVersionResource); ok {
|
||||
|
@ -93,10 +93,10 @@ func (_m *Helper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVe
|
|||
r0 = ret.Get(0).(schema.GroupVersionResource)
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(schema.GroupVersionResource) v1.APIResource); ok {
|
||||
if rf, ok := ret.Get(1).(func(schema.GroupVersionResource) metav1.APIResource); ok {
|
||||
r1 = rf(input)
|
||||
} else {
|
||||
r1 = ret.Get(1).(v1.APIResource)
|
||||
r1 = ret.Get(1).(metav1.APIResource)
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(2).(func(schema.GroupVersionResource) error); ok {
|
||||
|
@ -109,15 +109,15 @@ func (_m *Helper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVe
|
|||
}
|
||||
|
||||
// Resources provides a mock function with given fields:
|
||||
func (_m *Helper) Resources() []*v1.APIResourceList {
|
||||
func (_m *Helper) Resources() []*metav1.APIResourceList {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 []*v1.APIResourceList
|
||||
if rf, ok := ret.Get(0).(func() []*v1.APIResourceList); ok {
|
||||
var r0 []*metav1.APIResourceList
|
||||
if rf, ok := ret.Get(0).(func() []*metav1.APIResourceList); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]*v1.APIResourceList)
|
||||
r0 = ret.Get(0).([]*metav1.APIResourceList)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
snapshotter "github.com/kubernetes-csi/external-snapshotter/client/v7/clientset/versioned/typed/volumesnapshot/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -75,7 +75,7 @@ type CSISnapshotExposeParam struct {
|
|||
BackupPVCConfig map[string]nodeagent.BackupPVC
|
||||
|
||||
// Resources defines the resource requirements of the hosting pod
|
||||
Resources corev1.ResourceRequirements
|
||||
Resources corev1api.ResourceRequirements
|
||||
|
||||
// NodeOS specifies the OS of node that the source volume is attaching
|
||||
NodeOS string
|
||||
|
@ -103,7 +103,7 @@ type csiSnapshotExposer struct {
|
|||
log logrus.FieldLogger
|
||||
}
|
||||
|
||||
func (e *csiSnapshotExposer) Expose(ctx context.Context, ownerObject corev1.ObjectReference, param any) error {
|
||||
func (e *csiSnapshotExposer) Expose(ctx context.Context, ownerObject corev1api.ObjectReference, param any) error {
|
||||
csiExposeParam := param.(*CSISnapshotExposeParam)
|
||||
|
||||
curLog := e.log.WithFields(logrus.Fields{
|
||||
|
@ -236,7 +236,7 @@ func (e *csiSnapshotExposer) Expose(ctx context.Context, ownerObject corev1.Obje
|
|||
return nil
|
||||
}
|
||||
|
||||
func (e *csiSnapshotExposer) GetExposed(ctx context.Context, ownerObject corev1.ObjectReference, timeout time.Duration, param any) (*ExposeResult, error) {
|
||||
func (e *csiSnapshotExposer) GetExposed(ctx context.Context, ownerObject corev1api.ObjectReference, timeout time.Duration, param any) (*ExposeResult, error) {
|
||||
exposeWaitParam := param.(*CSISnapshotExposeWaitParam)
|
||||
|
||||
backupPodName := ownerObject.Name
|
||||
|
@ -249,7 +249,7 @@ func (e *csiSnapshotExposer) GetExposed(ctx context.Context, ownerObject corev1.
|
|||
"owner": ownerObject.Name,
|
||||
})
|
||||
|
||||
pod := &corev1.Pod{}
|
||||
pod := &corev1api.Pod{}
|
||||
err := exposeWaitParam.NodeClient.Get(ctx, types.NamespacedName{
|
||||
Namespace: ownerObject.Namespace,
|
||||
Name: backupPodName,
|
||||
|
@ -298,7 +298,7 @@ func (e *csiSnapshotExposer) GetExposed(ctx context.Context, ownerObject corev1.
|
|||
}}, nil
|
||||
}
|
||||
|
||||
func (e *csiSnapshotExposer) PeekExposed(ctx context.Context, ownerObject corev1.ObjectReference) error {
|
||||
func (e *csiSnapshotExposer) PeekExposed(ctx context.Context, ownerObject corev1api.ObjectReference) error {
|
||||
backupPodName := ownerObject.Name
|
||||
|
||||
curLog := e.log.WithFields(logrus.Fields{
|
||||
|
@ -322,7 +322,7 @@ func (e *csiSnapshotExposer) PeekExposed(ctx context.Context, ownerObject corev1
|
|||
return nil
|
||||
}
|
||||
|
||||
func (e *csiSnapshotExposer) DiagnoseExpose(ctx context.Context, ownerObject corev1.ObjectReference) string {
|
||||
func (e *csiSnapshotExposer) DiagnoseExpose(ctx context.Context, ownerObject corev1api.ObjectReference) string {
|
||||
backupPodName := ownerObject.Name
|
||||
backupPVCName := ownerObject.Name
|
||||
backupVSName := ownerObject.Name
|
||||
|
@ -388,7 +388,7 @@ func (e *csiSnapshotExposer) DiagnoseExpose(ctx context.Context, ownerObject cor
|
|||
|
||||
const cleanUpTimeout = time.Minute
|
||||
|
||||
func (e *csiSnapshotExposer) CleanUp(ctx context.Context, ownerObject corev1.ObjectReference, vsName string, sourceNamespace string) {
|
||||
func (e *csiSnapshotExposer) CleanUp(ctx context.Context, ownerObject corev1api.ObjectReference, vsName string, sourceNamespace string) {
|
||||
backupPodName := ownerObject.Name
|
||||
backupPVCName := ownerObject.Name
|
||||
backupVSName := ownerObject.Name
|
||||
|
@ -400,18 +400,18 @@ func (e *csiSnapshotExposer) CleanUp(ctx context.Context, ownerObject corev1.Obj
|
|||
csi.DeleteVolumeSnapshotIfAny(ctx, e.csiSnapshotClient, vsName, sourceNamespace, e.log)
|
||||
}
|
||||
|
||||
func getVolumeModeByAccessMode(accessMode string) (corev1.PersistentVolumeMode, error) {
|
||||
func getVolumeModeByAccessMode(accessMode string) (corev1api.PersistentVolumeMode, error) {
|
||||
switch accessMode {
|
||||
case AccessModeFileSystem:
|
||||
return corev1.PersistentVolumeFilesystem, nil
|
||||
return corev1api.PersistentVolumeFilesystem, nil
|
||||
case AccessModeBlock:
|
||||
return corev1.PersistentVolumeBlock, nil
|
||||
return corev1api.PersistentVolumeBlock, nil
|
||||
default:
|
||||
return "", errors.Errorf("unsupported access mode %s", accessMode)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *csiSnapshotExposer) createBackupVS(ctx context.Context, ownerObject corev1.ObjectReference, snapshotVS *snapshotv1api.VolumeSnapshot) (*snapshotv1api.VolumeSnapshot, error) {
|
||||
func (e *csiSnapshotExposer) createBackupVS(ctx context.Context, ownerObject corev1api.ObjectReference, snapshotVS *snapshotv1api.VolumeSnapshot) (*snapshotv1api.VolumeSnapshot, error) {
|
||||
backupVSName := ownerObject.Name
|
||||
backupVSCName := ownerObject.Name
|
||||
|
||||
|
@ -435,7 +435,7 @@ func (e *csiSnapshotExposer) createBackupVS(ctx context.Context, ownerObject cor
|
|||
return e.csiSnapshotClient.VolumeSnapshots(vs.Namespace).Create(ctx, vs, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
func (e *csiSnapshotExposer) createBackupVSC(ctx context.Context, ownerObject corev1.ObjectReference, snapshotVSC *snapshotv1api.VolumeSnapshotContent, vs *snapshotv1api.VolumeSnapshot) (*snapshotv1api.VolumeSnapshotContent, error) {
|
||||
func (e *csiSnapshotExposer) createBackupVSC(ctx context.Context, ownerObject corev1api.ObjectReference, snapshotVSC *snapshotv1api.VolumeSnapshotContent, vs *snapshotv1api.VolumeSnapshot) (*snapshotv1api.VolumeSnapshotContent, error) {
|
||||
backupVSCName := ownerObject.Name
|
||||
|
||||
vsc := &snapshotv1api.VolumeSnapshotContent{
|
||||
|
@ -444,7 +444,7 @@ func (e *csiSnapshotExposer) createBackupVSC(ctx context.Context, ownerObject co
|
|||
Annotations: snapshotVSC.Annotations,
|
||||
},
|
||||
Spec: snapshotv1api.VolumeSnapshotContentSpec{
|
||||
VolumeSnapshotRef: corev1.ObjectReference{
|
||||
VolumeSnapshotRef: corev1api.ObjectReference{
|
||||
Name: vs.Name,
|
||||
Namespace: vs.Namespace,
|
||||
UID: vs.UID,
|
||||
|
@ -462,7 +462,7 @@ func (e *csiSnapshotExposer) createBackupVSC(ctx context.Context, ownerObject co
|
|||
return e.csiSnapshotClient.VolumeSnapshotContents().Create(ctx, vsc, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
func (e *csiSnapshotExposer) createBackupPVC(ctx context.Context, ownerObject corev1.ObjectReference, backupVS, storageClass, accessMode string, resource resource.Quantity, readOnly bool) (*corev1.PersistentVolumeClaim, error) {
|
||||
func (e *csiSnapshotExposer) createBackupPVC(ctx context.Context, ownerObject corev1api.ObjectReference, backupVS, storageClass, accessMode string, resource resource.Quantity, readOnly bool) (*corev1api.PersistentVolumeClaim, error) {
|
||||
backupPVCName := ownerObject.Name
|
||||
|
||||
volumeMode, err := getVolumeModeByAccessMode(accessMode)
|
||||
|
@ -470,19 +470,19 @@ func (e *csiSnapshotExposer) createBackupPVC(ctx context.Context, ownerObject co
|
|||
return nil, err
|
||||
}
|
||||
|
||||
pvcAccessMode := corev1.ReadWriteOnce
|
||||
pvcAccessMode := corev1api.ReadWriteOnce
|
||||
|
||||
if readOnly {
|
||||
pvcAccessMode = corev1.ReadOnlyMany
|
||||
pvcAccessMode = corev1api.ReadOnlyMany
|
||||
}
|
||||
|
||||
dataSource := &corev1.TypedLocalObjectReference{
|
||||
dataSource := &corev1api.TypedLocalObjectReference{
|
||||
APIGroup: &snapshotv1api.SchemeGroupVersion.Group,
|
||||
Kind: "VolumeSnapshot",
|
||||
Name: backupVS,
|
||||
}
|
||||
|
||||
pvc := &corev1.PersistentVolumeClaim{
|
||||
pvc := &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: ownerObject.Namespace,
|
||||
Name: backupPVCName,
|
||||
|
@ -496,8 +496,8 @@ func (e *csiSnapshotExposer) createBackupPVC(ctx context.Context, ownerObject co
|
|||
},
|
||||
},
|
||||
},
|
||||
Spec: corev1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []corev1.PersistentVolumeAccessMode{
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
AccessModes: []corev1api.PersistentVolumeAccessMode{
|
||||
pvcAccessMode,
|
||||
},
|
||||
StorageClassName: &storageClass,
|
||||
|
@ -505,9 +505,9 @@ func (e *csiSnapshotExposer) createBackupPVC(ctx context.Context, ownerObject co
|
|||
DataSource: dataSource,
|
||||
DataSourceRef: nil,
|
||||
|
||||
Resources: corev1.VolumeResourceRequirements{
|
||||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceStorage: resource,
|
||||
Resources: corev1api.VolumeResourceRequirements{
|
||||
Requests: corev1api.ResourceList{
|
||||
corev1api.ResourceStorage: resource,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -523,17 +523,17 @@ func (e *csiSnapshotExposer) createBackupPVC(ctx context.Context, ownerObject co
|
|||
|
||||
func (e *csiSnapshotExposer) createBackupPod(
|
||||
ctx context.Context,
|
||||
ownerObject corev1.ObjectReference,
|
||||
backupPVC *corev1.PersistentVolumeClaim,
|
||||
ownerObject corev1api.ObjectReference,
|
||||
backupPVC *corev1api.PersistentVolumeClaim,
|
||||
operationTimeout time.Duration,
|
||||
label map[string]string,
|
||||
annotation map[string]string,
|
||||
affinity *kube.LoadAffinity,
|
||||
resources corev1.ResourceRequirements,
|
||||
resources corev1api.ResourceRequirements,
|
||||
backupPVCReadOnly bool,
|
||||
spcNoRelabeling bool,
|
||||
nodeOS string,
|
||||
) (*corev1.Pod, error) {
|
||||
) (*corev1api.Pod, error) {
|
||||
podName := ownerObject.Name
|
||||
|
||||
containerName := string(ownerObject.UID)
|
||||
|
@ -548,10 +548,10 @@ func (e *csiSnapshotExposer) createBackupPod(
|
|||
volumeMounts, volumeDevices, volumePath := kube.MakePodPVCAttachment(volumeName, backupPVC.Spec.VolumeMode, backupPVCReadOnly)
|
||||
volumeMounts = append(volumeMounts, podInfo.volumeMounts...)
|
||||
|
||||
volumes := []corev1.Volume{{
|
||||
volumes := []corev1api.Volume{{
|
||||
Name: volumeName,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: backupPVC.Name,
|
||||
},
|
||||
},
|
||||
|
@ -568,7 +568,7 @@ func (e *csiSnapshotExposer) createBackupPod(
|
|||
}
|
||||
label[podGroupLabel] = podGroupSnapshot
|
||||
|
||||
volumeMode := corev1.PersistentVolumeFilesystem
|
||||
volumeMode := corev1api.PersistentVolumeFilesystem
|
||||
if backupPVC.Spec.VolumeMode != nil {
|
||||
volumeMode = *backupPVC.Spec.VolumeMode
|
||||
}
|
||||
|
@ -588,14 +588,14 @@ func (e *csiSnapshotExposer) createBackupPod(
|
|||
affinityList = append(affinityList, affinity)
|
||||
}
|
||||
|
||||
var securityCtx *corev1.PodSecurityContext
|
||||
var securityCtx *corev1api.PodSecurityContext
|
||||
nodeSelector := map[string]string{}
|
||||
podOS := corev1.PodOS{}
|
||||
toleration := []corev1.Toleration{}
|
||||
podOS := corev1api.PodOS{}
|
||||
toleration := []corev1api.Toleration{}
|
||||
if nodeOS == kube.NodeOSWindows {
|
||||
userID := "ContainerAdministrator"
|
||||
securityCtx = &corev1.PodSecurityContext{
|
||||
WindowsOptions: &corev1.WindowsSecurityContextOptions{
|
||||
securityCtx = &corev1api.PodSecurityContext{
|
||||
WindowsOptions: &corev1api.WindowsSecurityContextOptions{
|
||||
RunAsUserName: &userID,
|
||||
},
|
||||
}
|
||||
|
@ -603,7 +603,7 @@ func (e *csiSnapshotExposer) createBackupPod(
|
|||
nodeSelector[kube.NodeOSLabel] = kube.NodeOSWindows
|
||||
podOS.Name = kube.NodeOSWindows
|
||||
|
||||
toleration = append(toleration, corev1.Toleration{
|
||||
toleration = append(toleration, corev1api.Toleration{
|
||||
Key: "os",
|
||||
Operator: "Equal",
|
||||
Effect: "NoSchedule",
|
||||
|
@ -611,12 +611,12 @@ func (e *csiSnapshotExposer) createBackupPod(
|
|||
})
|
||||
} else {
|
||||
userID := int64(0)
|
||||
securityCtx = &corev1.PodSecurityContext{
|
||||
securityCtx = &corev1api.PodSecurityContext{
|
||||
RunAsUser: &userID,
|
||||
}
|
||||
|
||||
if spcNoRelabeling {
|
||||
securityCtx.SELinuxOptions = &corev1.SELinuxOptions{
|
||||
securityCtx.SELinuxOptions = &corev1api.SELinuxOptions{
|
||||
Type: "spc_t",
|
||||
}
|
||||
}
|
||||
|
@ -625,7 +625,7 @@ func (e *csiSnapshotExposer) createBackupPod(
|
|||
podOS.Name = kube.NodeOSLinux
|
||||
}
|
||||
|
||||
pod := &corev1.Pod{
|
||||
pod := &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: ownerObject.Namespace,
|
||||
|
@ -641,12 +641,12 @@ func (e *csiSnapshotExposer) createBackupPod(
|
|||
Labels: label,
|
||||
Annotations: annotation,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
TopologySpreadConstraints: []corev1.TopologySpreadConstraint{
|
||||
Spec: corev1api.PodSpec{
|
||||
TopologySpreadConstraints: []corev1api.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "kubernetes.io/hostname",
|
||||
WhenUnsatisfiable: corev1.ScheduleAnyway,
|
||||
WhenUnsatisfiable: corev1api.ScheduleAnyway,
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
podGroupLabel: podGroupSnapshot,
|
||||
|
@ -657,11 +657,11 @@ func (e *csiSnapshotExposer) createBackupPod(
|
|||
NodeSelector: nodeSelector,
|
||||
OS: &podOS,
|
||||
Affinity: kube.ToSystemAffinity(affinityList),
|
||||
Containers: []corev1.Container{
|
||||
Containers: []corev1api.Container{
|
||||
{
|
||||
Name: containerName,
|
||||
Image: podInfo.image,
|
||||
ImagePullPolicy: corev1.PullNever,
|
||||
ImagePullPolicy: corev1api.PullNever,
|
||||
Command: []string{
|
||||
"/velero",
|
||||
"data-mover",
|
||||
|
@ -678,7 +678,7 @@ func (e *csiSnapshotExposer) createBackupPod(
|
|||
ServiceAccountName: podInfo.serviceAccount,
|
||||
TerminationGracePeriodSeconds: &gracePeriod,
|
||||
Volumes: volumes,
|
||||
RestartPolicy: corev1.RestartPolicyNever,
|
||||
RestartPolicy: corev1api.RestartPolicyNever,
|
||||
SecurityContext: securityCtx,
|
||||
Tolerations: toleration,
|
||||
},
|
||||
|
|
|
@ -26,8 +26,8 @@ import (
|
|||
snapshotFake "github.com/kubernetes-csi/external-snapshotter/client/v7/clientset/versioned/fake"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
appsv1api "k8s.io/api/apps/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -128,19 +128,19 @@ func TestExpose(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
daemonSet := &appsv1.DaemonSet{
|
||||
daemonSet := &appsv1api.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "velero",
|
||||
Name: "node-agent",
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "DaemonSet",
|
||||
APIVersion: appsv1.SchemeGroupVersion.String(),
|
||||
APIVersion: appsv1api.SchemeGroupVersion.String(),
|
||||
},
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
Spec: appsv1api.DaemonSetSpec{
|
||||
Template: corev1api.PodTemplateSpec{
|
||||
Spec: corev1api.PodSpec{
|
||||
Containers: []corev1api.Container{
|
||||
{
|
||||
Name: "node-agent",
|
||||
},
|
||||
|
@ -487,9 +487,9 @@ func TestExpose(t *testing.T) {
|
|||
log: velerotest.NewLogger(),
|
||||
}
|
||||
|
||||
var ownerObject corev1.ObjectReference
|
||||
var ownerObject corev1api.ObjectReference
|
||||
if test.ownerBackup != nil {
|
||||
ownerObject = corev1.ObjectReference{
|
||||
ownerObject = corev1api.ObjectReference{
|
||||
Kind: test.ownerBackup.Kind,
|
||||
Namespace: test.ownerBackup.Namespace,
|
||||
Name: test.ownerBackup.Name,
|
||||
|
@ -524,15 +524,15 @@ func TestExpose(t *testing.T) {
|
|||
assert.Equal(t, *expectedVSC.Spec.VolumeSnapshotClassName, *vscObj.Spec.VolumeSnapshotClassName)
|
||||
|
||||
if test.expectedVolumeSize != nil {
|
||||
assert.Equal(t, *test.expectedVolumeSize, backupPVC.Spec.Resources.Requests[corev1.ResourceStorage])
|
||||
assert.Equal(t, *test.expectedVolumeSize, backupPVC.Spec.Resources.Requests[corev1api.ResourceStorage])
|
||||
} else {
|
||||
assert.Equal(t, *resource.NewQuantity(restoreSize, ""), backupPVC.Spec.Resources.Requests[corev1.ResourceStorage])
|
||||
assert.Equal(t, *resource.NewQuantity(restoreSize, ""), backupPVC.Spec.Resources.Requests[corev1api.ResourceStorage])
|
||||
}
|
||||
|
||||
if test.expectedReadOnlyPVC {
|
||||
gotReadOnlyAccessMode := false
|
||||
for _, accessMode := range backupPVC.Spec.AccessModes {
|
||||
if accessMode == corev1.ReadOnlyMany {
|
||||
if accessMode == corev1api.ReadOnlyMany {
|
||||
gotReadOnlyAccessMode = true
|
||||
}
|
||||
}
|
||||
|
@ -562,13 +562,13 @@ func TestGetExpose(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
backupPod := &corev1.Pod{
|
||||
backupPod := &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: backup.Namespace,
|
||||
Name: backup.Name,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Volumes: []corev1.Volume{
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
{
|
||||
Name: "fake-volume",
|
||||
},
|
||||
|
@ -582,13 +582,13 @@ func TestGetExpose(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
backupPodWithoutVolume := &corev1.Pod{
|
||||
backupPodWithoutVolume := &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: backup.Namespace,
|
||||
Name: backup.Name,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Volumes: []corev1.Volume{
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
{
|
||||
Name: "fake-volume-1",
|
||||
},
|
||||
|
@ -599,24 +599,24 @@ func TestGetExpose(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
backupPVC := &corev1.PersistentVolumeClaim{
|
||||
backupPVC := &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: backup.Namespace,
|
||||
Name: backup.Name,
|
||||
},
|
||||
Spec: corev1.PersistentVolumeClaimSpec{
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "fake-pv-name",
|
||||
},
|
||||
}
|
||||
|
||||
backupPV := &corev1.PersistentVolume{
|
||||
backupPV := &corev1api.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pv-name",
|
||||
},
|
||||
}
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
corev1.AddToScheme(scheme)
|
||||
corev1api.AddToScheme(scheme)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
|
@ -695,9 +695,9 @@ func TestGetExpose(t *testing.T) {
|
|||
log: velerotest.NewLogger(),
|
||||
}
|
||||
|
||||
var ownerObject corev1.ObjectReference
|
||||
var ownerObject corev1api.ObjectReference
|
||||
if test.ownerBackup != nil {
|
||||
ownerObject = corev1.ObjectReference{
|
||||
ownerObject = corev1api.ObjectReference{
|
||||
Kind: test.ownerBackup.Kind,
|
||||
Namespace: test.ownerBackup.Namespace,
|
||||
Name: test.ownerBackup.Name,
|
||||
|
@ -739,17 +739,17 @@ func TestPeekExpose(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
backupPodUrecoverable := &corev1.Pod{
|
||||
backupPodUrecoverable := &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: backup.Namespace,
|
||||
Name: backup.Name,
|
||||
},
|
||||
Status: corev1.PodStatus{
|
||||
Phase: corev1.PodFailed,
|
||||
Status: corev1api.PodStatus{
|
||||
Phase: corev1api.PodFailed,
|
||||
},
|
||||
}
|
||||
|
||||
backupPod := &corev1.Pod{
|
||||
backupPod := &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: backup.Namespace,
|
||||
Name: backup.Name,
|
||||
|
@ -757,7 +757,7 @@ func TestPeekExpose(t *testing.T) {
|
|||
}
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
corev1.AddToScheme(scheme)
|
||||
corev1api.AddToScheme(scheme)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
|
@ -795,9 +795,9 @@ func TestPeekExpose(t *testing.T) {
|
|||
log: velerotest.NewLogger(),
|
||||
}
|
||||
|
||||
var ownerObject corev1.ObjectReference
|
||||
var ownerObject corev1api.ObjectReference
|
||||
if test.ownerBackup != nil {
|
||||
ownerObject = corev1.ObjectReference{
|
||||
ownerObject = corev1api.ObjectReference{
|
||||
Kind: test.ownerBackup.Kind,
|
||||
Namespace: test.ownerBackup.Namespace,
|
||||
Name: test.ownerBackup.Name,
|
||||
|
@ -829,14 +829,14 @@ func Test_csiSnapshotExposer_createBackupPVC(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
dataSource := &corev1.TypedLocalObjectReference{
|
||||
dataSource := &corev1api.TypedLocalObjectReference{
|
||||
APIGroup: &snapshotv1api.SchemeGroupVersion.Group,
|
||||
Kind: "VolumeSnapshot",
|
||||
Name: "fake-snapshot",
|
||||
}
|
||||
volumeMode := corev1.PersistentVolumeFilesystem
|
||||
volumeMode := corev1api.PersistentVolumeFilesystem
|
||||
|
||||
backupPVC := corev1.PersistentVolumeClaim{
|
||||
backupPVC := corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
|
@ -850,23 +850,23 @@ func Test_csiSnapshotExposer_createBackupPVC(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Spec: corev1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []corev1.PersistentVolumeAccessMode{
|
||||
corev1.ReadWriteOnce,
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
AccessModes: []corev1api.PersistentVolumeAccessMode{
|
||||
corev1api.ReadWriteOnce,
|
||||
},
|
||||
VolumeMode: &volumeMode,
|
||||
DataSource: dataSource,
|
||||
DataSourceRef: nil,
|
||||
StorageClassName: pointer.String("fake-storage-class"),
|
||||
Resources: corev1.VolumeResourceRequirements{
|
||||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceStorage: resource.MustParse("1Gi"),
|
||||
Resources: corev1api.VolumeResourceRequirements{
|
||||
Requests: corev1api.ResourceList{
|
||||
corev1api.ResourceStorage: resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
backupPVCReadOnly := corev1.PersistentVolumeClaim{
|
||||
backupPVCReadOnly := corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
|
@ -880,17 +880,17 @@ func Test_csiSnapshotExposer_createBackupPVC(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Spec: corev1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []corev1.PersistentVolumeAccessMode{
|
||||
corev1.ReadOnlyMany,
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
AccessModes: []corev1api.PersistentVolumeAccessMode{
|
||||
corev1api.ReadOnlyMany,
|
||||
},
|
||||
VolumeMode: &volumeMode,
|
||||
DataSource: dataSource,
|
||||
DataSourceRef: nil,
|
||||
StorageClassName: pointer.String("fake-storage-class"),
|
||||
Resources: corev1.VolumeResourceRequirements{
|
||||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceStorage: resource.MustParse("1Gi"),
|
||||
Resources: corev1api.VolumeResourceRequirements{
|
||||
Requests: corev1api.ResourceList{
|
||||
corev1api.ResourceStorage: resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -906,7 +906,7 @@ func Test_csiSnapshotExposer_createBackupPVC(t *testing.T) {
|
|||
readOnly bool
|
||||
kubeClientObj []runtime.Object
|
||||
snapshotClientObj []runtime.Object
|
||||
want *corev1.PersistentVolumeClaim
|
||||
want *corev1api.PersistentVolumeClaim
|
||||
wantErr assert.ErrorAssertionFunc
|
||||
}{
|
||||
{
|
||||
|
@ -941,9 +941,9 @@ func Test_csiSnapshotExposer_createBackupPVC(t *testing.T) {
|
|||
csiSnapshotClient: fakeSnapshotClient.SnapshotV1(),
|
||||
log: velerotest.NewLogger(),
|
||||
}
|
||||
var ownerObject corev1.ObjectReference
|
||||
var ownerObject corev1api.ObjectReference
|
||||
if tt.ownerBackup != nil {
|
||||
ownerObject = corev1.ObjectReference{
|
||||
ownerObject = corev1api.ObjectReference{
|
||||
Kind: tt.ownerBackup.Kind,
|
||||
Namespace: tt.ownerBackup.Namespace,
|
||||
Name: tt.ownerBackup.Name,
|
||||
|
@ -973,7 +973,7 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
backupPodWithoutNodeName := corev1.Pod{
|
||||
backupPodWithoutNodeName := corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
|
@ -986,19 +986,19 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Status: corev1.PodStatus{
|
||||
Phase: corev1.PodPending,
|
||||
Conditions: []corev1.PodCondition{
|
||||
Status: corev1api.PodStatus{
|
||||
Phase: corev1api.PodPending,
|
||||
Conditions: []corev1api.PodCondition{
|
||||
{
|
||||
Type: corev1.PodInitialized,
|
||||
Status: corev1.ConditionTrue,
|
||||
Type: corev1api.PodInitialized,
|
||||
Status: corev1api.ConditionTrue,
|
||||
Message: "fake-pod-message",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
backupPodWithNodeName := corev1.Pod{
|
||||
backupPodWithNodeName := corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
|
@ -1011,22 +1011,22 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Spec: corev1api.PodSpec{
|
||||
NodeName: "fake-node",
|
||||
},
|
||||
Status: corev1.PodStatus{
|
||||
Phase: corev1.PodPending,
|
||||
Conditions: []corev1.PodCondition{
|
||||
Status: corev1api.PodStatus{
|
||||
Phase: corev1api.PodPending,
|
||||
Conditions: []corev1api.PodCondition{
|
||||
{
|
||||
Type: corev1.PodInitialized,
|
||||
Status: corev1.ConditionTrue,
|
||||
Type: corev1api.PodInitialized,
|
||||
Status: corev1api.ConditionTrue,
|
||||
Message: "fake-pod-message",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
backupPVCWithoutVolumeName := corev1.PersistentVolumeClaim{
|
||||
backupPVCWithoutVolumeName := corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
|
@ -1039,12 +1039,12 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Status: corev1.PersistentVolumeClaimStatus{
|
||||
Phase: corev1.ClaimPending,
|
||||
Status: corev1api.PersistentVolumeClaimStatus{
|
||||
Phase: corev1api.ClaimPending,
|
||||
},
|
||||
}
|
||||
|
||||
backupPVCWithVolumeName := corev1.PersistentVolumeClaim{
|
||||
backupPVCWithVolumeName := corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
|
@ -1057,20 +1057,20 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Spec: corev1.PersistentVolumeClaimSpec{
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
VolumeName: "fake-pv",
|
||||
},
|
||||
Status: corev1.PersistentVolumeClaimStatus{
|
||||
Phase: corev1.ClaimPending,
|
||||
Status: corev1api.PersistentVolumeClaimStatus{
|
||||
Phase: corev1api.ClaimPending,
|
||||
},
|
||||
}
|
||||
|
||||
backupPV := corev1.PersistentVolume{
|
||||
backupPV := corev1api.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-pv",
|
||||
},
|
||||
Status: corev1.PersistentVolumeStatus{
|
||||
Phase: corev1.VolumePending,
|
||||
Status: corev1api.PersistentVolumeStatus{
|
||||
Phase: corev1api.VolumePending,
|
||||
Message: "fake-pv-message",
|
||||
},
|
||||
}
|
||||
|
@ -1142,17 +1142,17 @@ func Test_csiSnapshotExposer_DiagnoseExpose(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
nodeAgentPod := corev1.Pod{
|
||||
nodeAgentPod := corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "node-agent-pod-1",
|
||||
Labels: map[string]string{"role": "node-agent"},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Spec: corev1api.PodSpec{
|
||||
NodeName: "fake-node",
|
||||
},
|
||||
Status: corev1.PodStatus{
|
||||
Phase: corev1.PodRunning,
|
||||
Status: corev1api.PodStatus{
|
||||
Phase: corev1api.PodRunning,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -1334,9 +1334,9 @@ end diagnose CSI exposer`,
|
|||
csiSnapshotClient: fakeSnapshotClient.SnapshotV1(),
|
||||
log: velerotest.NewLogger(),
|
||||
}
|
||||
var ownerObject corev1.ObjectReference
|
||||
var ownerObject corev1api.ObjectReference
|
||||
if tt.ownerBackup != nil {
|
||||
ownerObject = corev1.ObjectReference{
|
||||
ownerObject = corev1api.ObjectReference{
|
||||
Kind: tt.ownerBackup.Kind,
|
||||
Namespace: tt.ownerBackup.Namespace,
|
||||
Name: tt.ownerBackup.Name,
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
@ -50,7 +50,7 @@ type GenericRestoreExposeParam struct {
|
|||
HostingPodAnnotations map[string]string
|
||||
|
||||
// Resources defines the resource requirements of the hosting pod
|
||||
Resources corev1.ResourceRequirements
|
||||
Resources corev1api.ResourceRequirements
|
||||
|
||||
// ExposeTimeout specifies the timeout for the entire expose process
|
||||
ExposeTimeout time.Duration
|
||||
|
@ -68,27 +68,27 @@ type GenericRestoreExposeParam struct {
|
|||
// GenericRestoreExposer is the interfaces for a generic restore exposer
|
||||
type GenericRestoreExposer interface {
|
||||
// Expose starts the process to a restore expose, the expose process may take long time
|
||||
Expose(context.Context, corev1.ObjectReference, GenericRestoreExposeParam) error
|
||||
Expose(context.Context, corev1api.ObjectReference, GenericRestoreExposeParam) error
|
||||
|
||||
// GetExposed polls the status of the expose.
|
||||
// If the expose is accessible by the current caller, it waits the expose ready and returns the expose result.
|
||||
// Otherwise, it returns nil as the expose result without an error.
|
||||
GetExposed(context.Context, corev1.ObjectReference, client.Client, string, time.Duration) (*ExposeResult, error)
|
||||
GetExposed(context.Context, corev1api.ObjectReference, client.Client, string, time.Duration) (*ExposeResult, error)
|
||||
|
||||
// PeekExposed tests the status of the expose.
|
||||
// If the expose is incomplete but not recoverable, it returns an error.
|
||||
// Otherwise, it returns nil immediately.
|
||||
PeekExposed(context.Context, corev1.ObjectReference) error
|
||||
PeekExposed(context.Context, corev1api.ObjectReference) error
|
||||
|
||||
// DiagnoseExpose generate the diagnostic info when the expose is not finished for a long time.
|
||||
// If it finds any problem, it returns an string about the problem.
|
||||
DiagnoseExpose(context.Context, corev1.ObjectReference) string
|
||||
DiagnoseExpose(context.Context, corev1api.ObjectReference) string
|
||||
|
||||
// RebindVolume unexposes the restored PV and rebind it to the target PVC
|
||||
RebindVolume(context.Context, corev1.ObjectReference, string, string, time.Duration) error
|
||||
RebindVolume(context.Context, corev1api.ObjectReference, string, string, time.Duration) error
|
||||
|
||||
// CleanUp cleans up any objects generated during the restore expose
|
||||
CleanUp(context.Context, corev1.ObjectReference)
|
||||
CleanUp(context.Context, corev1api.ObjectReference)
|
||||
}
|
||||
|
||||
// NewGenericRestoreExposer creates a new instance of generic restore exposer
|
||||
|
@ -104,7 +104,7 @@ type genericRestoreExposer struct {
|
|||
log logrus.FieldLogger
|
||||
}
|
||||
|
||||
func (e *genericRestoreExposer) Expose(ctx context.Context, ownerObject corev1.ObjectReference, param GenericRestoreExposeParam) error {
|
||||
func (e *genericRestoreExposer) Expose(ctx context.Context, ownerObject corev1api.ObjectReference, param GenericRestoreExposeParam) error {
|
||||
curLog := e.log.WithFields(logrus.Fields{
|
||||
"owner": ownerObject.Name,
|
||||
"target PVC": param.TargetPVCName,
|
||||
|
@ -151,7 +151,7 @@ func (e *genericRestoreExposer) Expose(ctx context.Context, ownerObject corev1.O
|
|||
return nil
|
||||
}
|
||||
|
||||
func (e *genericRestoreExposer) GetExposed(ctx context.Context, ownerObject corev1.ObjectReference, nodeClient client.Client, nodeName string, timeout time.Duration) (*ExposeResult, error) {
|
||||
func (e *genericRestoreExposer) GetExposed(ctx context.Context, ownerObject corev1api.ObjectReference, nodeClient client.Client, nodeName string, timeout time.Duration) (*ExposeResult, error) {
|
||||
restorePodName := ownerObject.Name
|
||||
restorePVCName := ownerObject.Name
|
||||
|
||||
|
@ -163,7 +163,7 @@ func (e *genericRestoreExposer) GetExposed(ctx context.Context, ownerObject core
|
|||
"node": nodeName,
|
||||
})
|
||||
|
||||
pod := &corev1.Pod{}
|
||||
pod := &corev1api.Pod{}
|
||||
err := nodeClient.Get(ctx, types.NamespacedName{
|
||||
Namespace: ownerObject.Namespace,
|
||||
Name: restorePodName,
|
||||
|
@ -206,7 +206,7 @@ func (e *genericRestoreExposer) GetExposed(ctx context.Context, ownerObject core
|
|||
}}, nil
|
||||
}
|
||||
|
||||
func (e *genericRestoreExposer) PeekExposed(ctx context.Context, ownerObject corev1.ObjectReference) error {
|
||||
func (e *genericRestoreExposer) PeekExposed(ctx context.Context, ownerObject corev1api.ObjectReference) error {
|
||||
restorePodName := ownerObject.Name
|
||||
|
||||
curLog := e.log.WithFields(logrus.Fields{
|
||||
|
@ -230,7 +230,7 @@ func (e *genericRestoreExposer) PeekExposed(ctx context.Context, ownerObject cor
|
|||
return nil
|
||||
}
|
||||
|
||||
func (e *genericRestoreExposer) DiagnoseExpose(ctx context.Context, ownerObject corev1.ObjectReference) string {
|
||||
func (e *genericRestoreExposer) DiagnoseExpose(ctx context.Context, ownerObject corev1api.ObjectReference) string {
|
||||
restorePodName := ownerObject.Name
|
||||
restorePVCName := ownerObject.Name
|
||||
|
||||
|
@ -275,7 +275,7 @@ func (e *genericRestoreExposer) DiagnoseExpose(ctx context.Context, ownerObject
|
|||
return diag
|
||||
}
|
||||
|
||||
func (e *genericRestoreExposer) CleanUp(ctx context.Context, ownerObject corev1.ObjectReference) {
|
||||
func (e *genericRestoreExposer) CleanUp(ctx context.Context, ownerObject corev1api.ObjectReference) {
|
||||
restorePodName := ownerObject.Name
|
||||
restorePVCName := ownerObject.Name
|
||||
|
||||
|
@ -283,7 +283,7 @@ func (e *genericRestoreExposer) CleanUp(ctx context.Context, ownerObject corev1.
|
|||
kube.DeletePVAndPVCIfAny(ctx, e.kubeClient.CoreV1(), restorePVCName, ownerObject.Namespace, 0, e.log)
|
||||
}
|
||||
|
||||
func (e *genericRestoreExposer) RebindVolume(ctx context.Context, ownerObject corev1.ObjectReference, targetPVCName string, targetNamespace string, timeout time.Duration) error {
|
||||
func (e *genericRestoreExposer) RebindVolume(ctx context.Context, ownerObject corev1api.ObjectReference, targetPVCName string, targetNamespace string, timeout time.Duration) error {
|
||||
restorePodName := ownerObject.Name
|
||||
restorePVCName := ownerObject.Name
|
||||
|
||||
|
@ -307,7 +307,7 @@ func (e *genericRestoreExposer) RebindVolume(ctx context.Context, ownerObject co
|
|||
|
||||
curLog.WithField("restore PV", restorePV.Name).Info("Restore PV is retrieved")
|
||||
|
||||
retained, err := kube.SetPVReclaimPolicy(ctx, e.kubeClient.CoreV1(), restorePV, corev1.PersistentVolumeReclaimRetain)
|
||||
retained, err := kube.SetPVReclaimPolicy(ctx, e.kubeClient.CoreV1(), restorePV, corev1api.PersistentVolumeReclaimRetain)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error to retain PV %s", restorePV.Name)
|
||||
}
|
||||
|
@ -376,8 +376,8 @@ func (e *genericRestoreExposer) RebindVolume(ctx context.Context, ownerObject co
|
|||
return nil
|
||||
}
|
||||
|
||||
func (e *genericRestoreExposer) createRestorePod(ctx context.Context, ownerObject corev1.ObjectReference, targetPVC *corev1.PersistentVolumeClaim,
|
||||
operationTimeout time.Duration, label map[string]string, annotation map[string]string, selectedNode string, resources corev1.ResourceRequirements, nodeOS string) (*corev1.Pod, error) {
|
||||
func (e *genericRestoreExposer) createRestorePod(ctx context.Context, ownerObject corev1api.ObjectReference, targetPVC *corev1api.PersistentVolumeClaim,
|
||||
operationTimeout time.Duration, label map[string]string, annotation map[string]string, selectedNode string, resources corev1api.ResourceRequirements, nodeOS string) (*corev1api.Pod, error) {
|
||||
restorePodName := ownerObject.Name
|
||||
restorePVCName := ownerObject.Name
|
||||
|
||||
|
@ -393,10 +393,10 @@ func (e *genericRestoreExposer) createRestorePod(ctx context.Context, ownerObjec
|
|||
volumeMounts, volumeDevices, volumePath := kube.MakePodPVCAttachment(volumeName, targetPVC.Spec.VolumeMode, false)
|
||||
volumeMounts = append(volumeMounts, podInfo.volumeMounts...)
|
||||
|
||||
volumes := []corev1.Volume{{
|
||||
volumes := []corev1api.Volume{{
|
||||
Name: volumeName,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: restorePVCName,
|
||||
},
|
||||
},
|
||||
|
@ -408,7 +408,7 @@ func (e *genericRestoreExposer) createRestorePod(ctx context.Context, ownerObjec
|
|||
}
|
||||
label[podGroupLabel] = podGroupGenericRestore
|
||||
|
||||
volumeMode := corev1.PersistentVolumeFilesystem
|
||||
volumeMode := corev1api.PersistentVolumeFilesystem
|
||||
if targetPVC.Spec.VolumeMode != nil {
|
||||
volumeMode = *targetPVC.Spec.VolumeMode
|
||||
}
|
||||
|
@ -423,14 +423,14 @@ func (e *genericRestoreExposer) createRestorePod(ctx context.Context, ownerObjec
|
|||
args = append(args, podInfo.logFormatArgs...)
|
||||
args = append(args, podInfo.logLevelArgs...)
|
||||
|
||||
var securityCtx *corev1.PodSecurityContext
|
||||
var securityCtx *corev1api.PodSecurityContext
|
||||
nodeSelector := map[string]string{}
|
||||
podOS := corev1.PodOS{}
|
||||
toleration := []corev1.Toleration{}
|
||||
podOS := corev1api.PodOS{}
|
||||
toleration := []corev1api.Toleration{}
|
||||
if nodeOS == kube.NodeOSWindows {
|
||||
userID := "ContainerAdministrator"
|
||||
securityCtx = &corev1.PodSecurityContext{
|
||||
WindowsOptions: &corev1.WindowsSecurityContextOptions{
|
||||
securityCtx = &corev1api.PodSecurityContext{
|
||||
WindowsOptions: &corev1api.WindowsSecurityContextOptions{
|
||||
RunAsUserName: &userID,
|
||||
},
|
||||
}
|
||||
|
@ -438,7 +438,7 @@ func (e *genericRestoreExposer) createRestorePod(ctx context.Context, ownerObjec
|
|||
nodeSelector[kube.NodeOSLabel] = kube.NodeOSWindows
|
||||
podOS.Name = kube.NodeOSWindows
|
||||
|
||||
toleration = append(toleration, corev1.Toleration{
|
||||
toleration = append(toleration, corev1api.Toleration{
|
||||
Key: "os",
|
||||
Operator: "Equal",
|
||||
Effect: "NoSchedule",
|
||||
|
@ -446,7 +446,7 @@ func (e *genericRestoreExposer) createRestorePod(ctx context.Context, ownerObjec
|
|||
})
|
||||
} else {
|
||||
userID := int64(0)
|
||||
securityCtx = &corev1.PodSecurityContext{
|
||||
securityCtx = &corev1api.PodSecurityContext{
|
||||
RunAsUser: &userID,
|
||||
}
|
||||
|
||||
|
@ -454,7 +454,7 @@ func (e *genericRestoreExposer) createRestorePod(ctx context.Context, ownerObjec
|
|||
podOS.Name = kube.NodeOSLinux
|
||||
}
|
||||
|
||||
pod := &corev1.Pod{
|
||||
pod := &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: restorePodName,
|
||||
Namespace: ownerObject.Namespace,
|
||||
|
@ -470,12 +470,12 @@ func (e *genericRestoreExposer) createRestorePod(ctx context.Context, ownerObjec
|
|||
Labels: label,
|
||||
Annotations: annotation,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
TopologySpreadConstraints: []corev1.TopologySpreadConstraint{
|
||||
Spec: corev1api.PodSpec{
|
||||
TopologySpreadConstraints: []corev1api.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "kubernetes.io/hostname",
|
||||
WhenUnsatisfiable: corev1.ScheduleAnyway,
|
||||
WhenUnsatisfiable: corev1api.ScheduleAnyway,
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
podGroupLabel: podGroupGenericRestore,
|
||||
|
@ -485,11 +485,11 @@ func (e *genericRestoreExposer) createRestorePod(ctx context.Context, ownerObjec
|
|||
},
|
||||
NodeSelector: nodeSelector,
|
||||
OS: &podOS,
|
||||
Containers: []corev1.Container{
|
||||
Containers: []corev1api.Container{
|
||||
{
|
||||
Name: containerName,
|
||||
Image: podInfo.image,
|
||||
ImagePullPolicy: corev1.PullNever,
|
||||
ImagePullPolicy: corev1api.PullNever,
|
||||
Command: []string{
|
||||
"/velero",
|
||||
"data-mover",
|
||||
|
@ -507,7 +507,7 @@ func (e *genericRestoreExposer) createRestorePod(ctx context.Context, ownerObjec
|
|||
TerminationGracePeriodSeconds: &gracePeriod,
|
||||
Volumes: volumes,
|
||||
NodeName: selectedNode,
|
||||
RestartPolicy: corev1.RestartPolicyNever,
|
||||
RestartPolicy: corev1api.RestartPolicyNever,
|
||||
SecurityContext: securityCtx,
|
||||
Tolerations: toleration,
|
||||
},
|
||||
|
@ -516,10 +516,10 @@ func (e *genericRestoreExposer) createRestorePod(ctx context.Context, ownerObjec
|
|||
return e.kubeClient.CoreV1().Pods(ownerObject.Namespace).Create(ctx, pod, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
func (e *genericRestoreExposer) createRestorePVC(ctx context.Context, ownerObject corev1.ObjectReference, targetPVC *corev1.PersistentVolumeClaim, selectedNode string) (*corev1.PersistentVolumeClaim, error) {
|
||||
func (e *genericRestoreExposer) createRestorePVC(ctx context.Context, ownerObject corev1api.ObjectReference, targetPVC *corev1api.PersistentVolumeClaim, selectedNode string) (*corev1api.PersistentVolumeClaim, error) {
|
||||
restorePVCName := ownerObject.Name
|
||||
|
||||
pvcObj := &corev1.PersistentVolumeClaim{
|
||||
pvcObj := &corev1api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: ownerObject.Namespace,
|
||||
Name: restorePVCName,
|
||||
|
@ -535,7 +535,7 @@ func (e *genericRestoreExposer) createRestorePVC(ctx context.Context, ownerObjec
|
|||
},
|
||||
},
|
||||
},
|
||||
Spec: corev1.PersistentVolumeClaimSpec{
|
||||
Spec: corev1api.PersistentVolumeClaimSpec{
|
||||
AccessModes: targetPVC.Spec.AccessModes,
|
||||
StorageClassName: targetPVC.Spec.StorageClassName,
|
||||
VolumeMode: targetPVC.Spec.VolumeMode,
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
velerotest "github.com/vmware-tanzu/velero/pkg/test"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
appsv1api "k8s.io/api/apps/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
clientTesting "k8s.io/client-go/testing"
|
||||
)
|
||||
|
@ -65,16 +65,16 @@ func TestRestoreExpose(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
daemonSet := &appsv1.DaemonSet{
|
||||
daemonSet := &appsv1api.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "velero",
|
||||
Name: "node-agent",
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "DaemonSet",
|
||||
APIVersion: appsv1.SchemeGroupVersion.String(),
|
||||
APIVersion: appsv1api.SchemeGroupVersion.String(),
|
||||
},
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Spec: appsv1api.DaemonSetSpec{
|
||||
Template: corev1api.PodTemplateSpec{
|
||||
Spec: corev1api.PodSpec{
|
||||
Containers: []corev1api.Container{
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/datapath"
|
||||
|
@ -36,7 +36,7 @@ var getVolumeMode = kube.GetVolumeMode
|
|||
var singlePathMatch = kube.SinglePathMatch
|
||||
|
||||
// GetPodVolumeHostPath returns a path that can be accessed from the host for a given volume of a pod
|
||||
func GetPodVolumeHostPath(ctx context.Context, pod *corev1.Pod, volumeName string,
|
||||
func GetPodVolumeHostPath(ctx context.Context, pod *corev1api.Pod, volumeName string,
|
||||
cli ctrlclient.Client, fs filesystem.Interface, log logrus.FieldLogger) (datapath.AccessPoint, error) {
|
||||
logger := log.WithField("pod name", pod.Name).WithField("pod UID", pod.GetUID()).WithField("volume", volumeName)
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
|
@ -36,16 +36,16 @@ import (
|
|||
func TestGetPodVolumeHostPath(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
getVolumeDirFunc func(context.Context, logrus.FieldLogger, *corev1.Pod, string, ctrlclient.Client) (string, error)
|
||||
getVolumeModeFunc func(context.Context, logrus.FieldLogger, *corev1.Pod, string, ctrlclient.Client) (uploader.PersistentVolumeMode, error)
|
||||
getVolumeDirFunc func(context.Context, logrus.FieldLogger, *corev1api.Pod, string, ctrlclient.Client) (string, error)
|
||||
getVolumeModeFunc func(context.Context, logrus.FieldLogger, *corev1api.Pod, string, ctrlclient.Client) (uploader.PersistentVolumeMode, error)
|
||||
pathMatchFunc func(string, filesystem.Interface, logrus.FieldLogger) (string, error)
|
||||
pod *corev1.Pod
|
||||
pod *corev1api.Pod
|
||||
pvc string
|
||||
err string
|
||||
}{
|
||||
{
|
||||
name: "get volume dir fail",
|
||||
getVolumeDirFunc: func(context.Context, logrus.FieldLogger, *corev1.Pod, string, ctrlclient.Client) (string, error) {
|
||||
getVolumeDirFunc: func(context.Context, logrus.FieldLogger, *corev1api.Pod, string, ctrlclient.Client) (string, error) {
|
||||
return "", errors.New("fake-error-1")
|
||||
},
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, "fake-pod-1").Result(),
|
||||
|
@ -54,10 +54,10 @@ func TestGetPodVolumeHostPath(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "single path match fail",
|
||||
getVolumeDirFunc: func(context.Context, logrus.FieldLogger, *corev1.Pod, string, ctrlclient.Client) (string, error) {
|
||||
getVolumeDirFunc: func(context.Context, logrus.FieldLogger, *corev1api.Pod, string, ctrlclient.Client) (string, error) {
|
||||
return "", nil
|
||||
},
|
||||
getVolumeModeFunc: func(context.Context, logrus.FieldLogger, *corev1.Pod, string, ctrlclient.Client) (uploader.PersistentVolumeMode, error) {
|
||||
getVolumeModeFunc: func(context.Context, logrus.FieldLogger, *corev1api.Pod, string, ctrlclient.Client) (uploader.PersistentVolumeMode, error) {
|
||||
return uploader.PersistentVolumeFilesystem, nil
|
||||
},
|
||||
pathMatchFunc: func(string, filesystem.Interface, logrus.FieldLogger) (string, error) {
|
||||
|
@ -69,7 +69,7 @@ func TestGetPodVolumeHostPath(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "get block volume dir success",
|
||||
getVolumeDirFunc: func(context.Context, logrus.FieldLogger, *corev1.Pod, string, ctrlclient.Client) (
|
||||
getVolumeDirFunc: func(context.Context, logrus.FieldLogger, *corev1api.Pod, string, ctrlclient.Client) (
|
||||
string, error) {
|
||||
return "fake-pvc-1", nil
|
||||
},
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/nodeagent"
|
||||
|
@ -30,10 +30,10 @@ import (
|
|||
type inheritedPodInfo struct {
|
||||
image string
|
||||
serviceAccount string
|
||||
env []v1.EnvVar
|
||||
envFrom []v1.EnvFromSource
|
||||
volumeMounts []v1.VolumeMount
|
||||
volumes []v1.Volume
|
||||
env []corev1api.EnvVar
|
||||
envFrom []corev1api.EnvFromSource
|
||||
volumeMounts []corev1api.VolumeMount
|
||||
volumes []corev1api.Volume
|
||||
logLevelArgs []string
|
||||
logFormatArgs []string
|
||||
}
|
||||
|
|
|
@ -28,13 +28,13 @@ import (
|
|||
|
||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
appsv1api "k8s.io/api/apps/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
func TestGetInheritedPodInfo(t *testing.T) {
|
||||
daemonSet := &appsv1.DaemonSet{
|
||||
daemonSet := &appsv1api.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "fake-ns",
|
||||
Name: "node-agent",
|
||||
|
@ -44,7 +44,7 @@ func TestGetInheritedPodInfo(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
daemonSetWithNoLog := &appsv1.DaemonSet{
|
||||
daemonSetWithNoLog := &appsv1api.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "fake-ns",
|
||||
Name: "node-agent",
|
||||
|
@ -52,14 +52,14 @@ func TestGetInheritedPodInfo(t *testing.T) {
|
|||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "DaemonSet",
|
||||
},
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
Spec: appsv1api.DaemonSetSpec{
|
||||
Template: corev1api.PodTemplateSpec{
|
||||
Spec: corev1api.PodSpec{
|
||||
Containers: []corev1api.Container{
|
||||
{
|
||||
Name: "container-1",
|
||||
Image: "image-1",
|
||||
Env: []v1.EnvVar{
|
||||
Env: []corev1api.EnvVar{
|
||||
{
|
||||
Name: "env-1",
|
||||
Value: "value-1",
|
||||
|
@ -69,23 +69,23 @@ func TestGetInheritedPodInfo(t *testing.T) {
|
|||
Value: "value-2",
|
||||
},
|
||||
},
|
||||
EnvFrom: []v1.EnvFromSource{
|
||||
EnvFrom: []corev1api.EnvFromSource{
|
||||
{
|
||||
ConfigMapRef: &v1.ConfigMapEnvSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
ConfigMapRef: &corev1api.ConfigMapEnvSource{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "test-configmap",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
SecretRef: &v1.SecretEnvSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
SecretRef: &corev1api.SecretEnvSource{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "test-secret",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
VolumeMounts: []corev1api.VolumeMount{
|
||||
{
|
||||
Name: "volume-1",
|
||||
},
|
||||
|
@ -95,7 +95,7 @@ func TestGetInheritedPodInfo(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Volumes: []v1.Volume{
|
||||
Volumes: []corev1api.Volume{
|
||||
{
|
||||
Name: "volume-1",
|
||||
},
|
||||
|
@ -109,7 +109,7 @@ func TestGetInheritedPodInfo(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
daemonSetWithLog := &appsv1.DaemonSet{
|
||||
daemonSetWithLog := &appsv1api.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "fake-ns",
|
||||
Name: "node-agent",
|
||||
|
@ -117,14 +117,14 @@ func TestGetInheritedPodInfo(t *testing.T) {
|
|||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "DaemonSet",
|
||||
},
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
Spec: appsv1api.DaemonSetSpec{
|
||||
Template: corev1api.PodTemplateSpec{
|
||||
Spec: corev1api.PodSpec{
|
||||
Containers: []corev1api.Container{
|
||||
{
|
||||
Name: "container-1",
|
||||
Image: "image-1",
|
||||
Env: []v1.EnvVar{
|
||||
Env: []corev1api.EnvVar{
|
||||
{
|
||||
Name: "env-1",
|
||||
Value: "value-1",
|
||||
|
@ -134,23 +134,23 @@ func TestGetInheritedPodInfo(t *testing.T) {
|
|||
Value: "value-2",
|
||||
},
|
||||
},
|
||||
EnvFrom: []v1.EnvFromSource{
|
||||
EnvFrom: []corev1api.EnvFromSource{
|
||||
{
|
||||
ConfigMapRef: &v1.ConfigMapEnvSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
ConfigMapRef: &corev1api.ConfigMapEnvSource{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "test-configmap",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
SecretRef: &v1.SecretEnvSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
SecretRef: &corev1api.SecretEnvSource{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "test-secret",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
VolumeMounts: []corev1api.VolumeMount{
|
||||
{
|
||||
Name: "volume-1",
|
||||
},
|
||||
|
@ -168,7 +168,7 @@ func TestGetInheritedPodInfo(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Volumes: []v1.Volume{
|
||||
Volumes: []corev1api.Volume{
|
||||
{
|
||||
Name: "volume-1",
|
||||
},
|
||||
|
@ -183,7 +183,7 @@ func TestGetInheritedPodInfo(t *testing.T) {
|
|||
}
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
appsv1.AddToScheme(scheme)
|
||||
appsv1api.AddToScheme(scheme)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
|
@ -215,7 +215,7 @@ func TestGetInheritedPodInfo(t *testing.T) {
|
|||
result: inheritedPodInfo{
|
||||
image: "image-1",
|
||||
serviceAccount: "sa-1",
|
||||
env: []v1.EnvVar{
|
||||
env: []corev1api.EnvVar{
|
||||
{
|
||||
Name: "env-1",
|
||||
Value: "value-1",
|
||||
|
@ -225,23 +225,23 @@ func TestGetInheritedPodInfo(t *testing.T) {
|
|||
Value: "value-2",
|
||||
},
|
||||
},
|
||||
envFrom: []v1.EnvFromSource{
|
||||
envFrom: []corev1api.EnvFromSource{
|
||||
{
|
||||
ConfigMapRef: &v1.ConfigMapEnvSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
ConfigMapRef: &corev1api.ConfigMapEnvSource{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "test-configmap",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
SecretRef: &v1.SecretEnvSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
SecretRef: &corev1api.SecretEnvSource{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "test-secret",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
volumeMounts: []v1.VolumeMount{
|
||||
volumeMounts: []corev1api.VolumeMount{
|
||||
{
|
||||
Name: "volume-1",
|
||||
},
|
||||
|
@ -249,7 +249,7 @@ func TestGetInheritedPodInfo(t *testing.T) {
|
|||
Name: "volume-2",
|
||||
},
|
||||
},
|
||||
volumes: []v1.Volume{
|
||||
volumes: []corev1api.Volume{
|
||||
{
|
||||
Name: "volume-1",
|
||||
},
|
||||
|
@ -268,7 +268,7 @@ func TestGetInheritedPodInfo(t *testing.T) {
|
|||
result: inheritedPodInfo{
|
||||
image: "image-1",
|
||||
serviceAccount: "sa-1",
|
||||
env: []v1.EnvVar{
|
||||
env: []corev1api.EnvVar{
|
||||
{
|
||||
Name: "env-1",
|
||||
Value: "value-1",
|
||||
|
@ -278,23 +278,23 @@ func TestGetInheritedPodInfo(t *testing.T) {
|
|||
Value: "value-2",
|
||||
},
|
||||
},
|
||||
envFrom: []v1.EnvFromSource{
|
||||
envFrom: []corev1api.EnvFromSource{
|
||||
{
|
||||
ConfigMapRef: &v1.ConfigMapEnvSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
ConfigMapRef: &corev1api.ConfigMapEnvSource{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "test-configmap",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
SecretRef: &v1.SecretEnvSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
SecretRef: &corev1api.SecretEnvSource{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "test-secret",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
volumeMounts: []v1.VolumeMount{
|
||||
volumeMounts: []corev1api.VolumeMount{
|
||||
{
|
||||
Name: "volume-1",
|
||||
},
|
||||
|
@ -302,7 +302,7 @@ func TestGetInheritedPodInfo(t *testing.T) {
|
|||
Name: "volume-2",
|
||||
},
|
||||
},
|
||||
volumes: []v1.Volume{
|
||||
volumes: []corev1api.Volume{
|
||||
{
|
||||
Name: "volume-1",
|
||||
},
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
|
||||
time "time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// GenericRestoreExposer is an autogenerated mock type for the GenericRestoreExposer type
|
||||
|
@ -22,12 +22,12 @@ type GenericRestoreExposer struct {
|
|||
}
|
||||
|
||||
// CleanUp provides a mock function with given fields: _a0, _a1
|
||||
func (_m *GenericRestoreExposer) CleanUp(_a0 context.Context, _a1 v1.ObjectReference) {
|
||||
func (_m *GenericRestoreExposer) CleanUp(_a0 context.Context, _a1 corev1api.ObjectReference) {
|
||||
_m.Called(_a0, _a1)
|
||||
}
|
||||
|
||||
// DiagnoseExpose provides a mock function with given fields: _a0, _a1
|
||||
func (_m *GenericRestoreExposer) DiagnoseExpose(_a0 context.Context, _a1 v1.ObjectReference) string {
|
||||
func (_m *GenericRestoreExposer) DiagnoseExpose(_a0 context.Context, _a1 corev1api.ObjectReference) string {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
if len(ret) == 0 {
|
||||
|
@ -35,7 +35,7 @@ func (_m *GenericRestoreExposer) DiagnoseExpose(_a0 context.Context, _a1 v1.Obje
|
|||
}
|
||||
|
||||
var r0 string
|
||||
if rf, ok := ret.Get(0).(func(context.Context, v1.ObjectReference) string); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, corev1api.ObjectReference) string); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Get(0).(string)
|
||||
|
@ -45,7 +45,7 @@ func (_m *GenericRestoreExposer) DiagnoseExpose(_a0 context.Context, _a1 v1.Obje
|
|||
}
|
||||
|
||||
// Expose provides a mock function with given fields: _a0, _a1, _a2
|
||||
func (_m *GenericRestoreExposer) Expose(_a0 context.Context, _a1 v1.ObjectReference, _a2 exposer.GenericRestoreExposeParam) error {
|
||||
func (_m *GenericRestoreExposer) Expose(_a0 context.Context, _a1 corev1api.ObjectReference, _a2 exposer.GenericRestoreExposeParam) error {
|
||||
ret := _m.Called(_a0, _a1, _a2)
|
||||
|
||||
if len(ret) == 0 {
|
||||
|
@ -53,7 +53,7 @@ func (_m *GenericRestoreExposer) Expose(_a0 context.Context, _a1 v1.ObjectRefere
|
|||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, v1.ObjectReference, exposer.GenericRestoreExposeParam) error); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, corev1api.ObjectReference, exposer.GenericRestoreExposeParam) error); ok {
|
||||
r0 = rf(_a0, _a1, _a2)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
|
@ -63,7 +63,7 @@ func (_m *GenericRestoreExposer) Expose(_a0 context.Context, _a1 v1.ObjectRefere
|
|||
}
|
||||
|
||||
// GetExposed provides a mock function with given fields: _a0, _a1, _a2, _a3, _a4
|
||||
func (_m *GenericRestoreExposer) GetExposed(_a0 context.Context, _a1 v1.ObjectReference, _a2 client.Client, _a3 string, _a4 time.Duration) (*exposer.ExposeResult, error) {
|
||||
func (_m *GenericRestoreExposer) GetExposed(_a0 context.Context, _a1 corev1api.ObjectReference, _a2 client.Client, _a3 string, _a4 time.Duration) (*exposer.ExposeResult, error) {
|
||||
ret := _m.Called(_a0, _a1, _a2, _a3, _a4)
|
||||
|
||||
if len(ret) == 0 {
|
||||
|
@ -72,10 +72,10 @@ func (_m *GenericRestoreExposer) GetExposed(_a0 context.Context, _a1 v1.ObjectRe
|
|||
|
||||
var r0 *exposer.ExposeResult
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, v1.ObjectReference, client.Client, string, time.Duration) (*exposer.ExposeResult, error)); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, corev1api.ObjectReference, client.Client, string, time.Duration) (*exposer.ExposeResult, error)); ok {
|
||||
return rf(_a0, _a1, _a2, _a3, _a4)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, v1.ObjectReference, client.Client, string, time.Duration) *exposer.ExposeResult); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, corev1api.ObjectReference, client.Client, string, time.Duration) *exposer.ExposeResult); ok {
|
||||
r0 = rf(_a0, _a1, _a2, _a3, _a4)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
|
@ -83,7 +83,7 @@ func (_m *GenericRestoreExposer) GetExposed(_a0 context.Context, _a1 v1.ObjectRe
|
|||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, v1.ObjectReference, client.Client, string, time.Duration) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, corev1api.ObjectReference, client.Client, string, time.Duration) error); ok {
|
||||
r1 = rf(_a0, _a1, _a2, _a3, _a4)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
|
@ -93,7 +93,7 @@ func (_m *GenericRestoreExposer) GetExposed(_a0 context.Context, _a1 v1.ObjectRe
|
|||
}
|
||||
|
||||
// PeekExposed provides a mock function with given fields: _a0, _a1
|
||||
func (_m *GenericRestoreExposer) PeekExposed(_a0 context.Context, _a1 v1.ObjectReference) error {
|
||||
func (_m *GenericRestoreExposer) PeekExposed(_a0 context.Context, _a1 corev1api.ObjectReference) error {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
if len(ret) == 0 {
|
||||
|
@ -101,7 +101,7 @@ func (_m *GenericRestoreExposer) PeekExposed(_a0 context.Context, _a1 v1.ObjectR
|
|||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, v1.ObjectReference) error); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, corev1api.ObjectReference) error); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
|
@ -111,7 +111,7 @@ func (_m *GenericRestoreExposer) PeekExposed(_a0 context.Context, _a1 v1.ObjectR
|
|||
}
|
||||
|
||||
// RebindVolume provides a mock function with given fields: _a0, _a1, _a2, _a3, _a4
|
||||
func (_m *GenericRestoreExposer) RebindVolume(_a0 context.Context, _a1 v1.ObjectReference, _a2 string, _a3 string, _a4 time.Duration) error {
|
||||
func (_m *GenericRestoreExposer) RebindVolume(_a0 context.Context, _a1 corev1api.ObjectReference, _a2 string, _a3 string, _a4 time.Duration) error {
|
||||
ret := _m.Called(_a0, _a1, _a2, _a3, _a4)
|
||||
|
||||
if len(ret) == 0 {
|
||||
|
@ -119,7 +119,7 @@ func (_m *GenericRestoreExposer) RebindVolume(_a0 context.Context, _a1 v1.Object
|
|||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, v1.ObjectReference, string, string, time.Duration) error); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, corev1api.ObjectReference, string, string, time.Duration) error); ok {
|
||||
r0 = rf(_a0, _a1, _a2, _a3, _a4)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
|
|
|
@ -20,27 +20,27 @@ import (
|
|||
"context"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// SnapshotExposer is the interfaces for a snapshot exposer
|
||||
type SnapshotExposer interface {
|
||||
// Expose starts the process to expose a snapshot, the expose process may take long time
|
||||
Expose(context.Context, corev1.ObjectReference, any) error
|
||||
Expose(context.Context, corev1api.ObjectReference, any) error
|
||||
// GetExposed polls the status of the expose.
|
||||
// If the expose is accessible by the current caller, it waits the expose ready and returns the expose result.
|
||||
// Otherwise, it returns nil as the expose result without an error.
|
||||
GetExposed(context.Context, corev1.ObjectReference, time.Duration, any) (*ExposeResult, error)
|
||||
GetExposed(context.Context, corev1api.ObjectReference, time.Duration, any) (*ExposeResult, error)
|
||||
|
||||
// PeekExposed tests the status of the expose.
|
||||
// If the expose is incomplete but not recoverable, it returns an error.
|
||||
// Otherwise, it returns nil immediately.
|
||||
PeekExposed(context.Context, corev1.ObjectReference) error
|
||||
PeekExposed(context.Context, corev1api.ObjectReference) error
|
||||
|
||||
// DiagnoseExpose generate the diagnostic info when the expose is not finished for a long time.
|
||||
// If it finds any problem, it returns an string about the problem.
|
||||
DiagnoseExpose(context.Context, corev1.ObjectReference) string
|
||||
DiagnoseExpose(context.Context, corev1api.ObjectReference) string
|
||||
|
||||
// CleanUp cleans up any objects generated during the snapshot expose
|
||||
CleanUp(context.Context, corev1.ObjectReference, string, string)
|
||||
CleanUp(context.Context, corev1api.ObjectReference, string, string)
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package exposer
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -36,7 +36,7 @@ type ExposeResult struct {
|
|||
|
||||
// ExposeByPod defines the result for the expose method that a hosting pod is created
|
||||
type ExposeByPod struct {
|
||||
HostingPod *corev1.Pod
|
||||
HostingPod *corev1api.Pod
|
||||
HostingContainer string
|
||||
VolumeName string
|
||||
NodeOS *string
|
||||
|
|
|
@ -20,14 +20,14 @@ import (
|
|||
"fmt"
|
||||
"strings"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
appsv1api "k8s.io/api/apps/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/vmware-tanzu/velero/internal/velero"
|
||||
)
|
||||
|
||||
func DaemonSet(namespace string, opts ...podTemplateOption) *appsv1.DaemonSet {
|
||||
func DaemonSet(namespace string, opts ...podTemplateOption) *appsv1api.DaemonSet {
|
||||
c := &podTemplateConfig{
|
||||
image: velero.DefaultVeleroImage(),
|
||||
}
|
||||
|
@ -36,10 +36,10 @@ func DaemonSet(namespace string, opts ...podTemplateOption) *appsv1.DaemonSet {
|
|||
opt(c)
|
||||
}
|
||||
|
||||
pullPolicy := corev1.PullAlways
|
||||
pullPolicy := corev1api.PullAlways
|
||||
imageParts := strings.Split(c.image, ":")
|
||||
if len(imageParts) == 2 && imageParts[1] != "latest" {
|
||||
pullPolicy = corev1.PullIfNotPresent
|
||||
pullPolicy = corev1api.PullIfNotPresent
|
||||
}
|
||||
|
||||
daemonSetArgs := []string{
|
||||
|
@ -55,26 +55,26 @@ func DaemonSet(namespace string, opts ...podTemplateOption) *appsv1.DaemonSet {
|
|||
}
|
||||
|
||||
userID := int64(0)
|
||||
mountPropagationMode := corev1.MountPropagationHostToContainer
|
||||
mountPropagationMode := corev1api.MountPropagationHostToContainer
|
||||
|
||||
dsName := "node-agent"
|
||||
if c.forWindows {
|
||||
dsName = "node-agent-windows"
|
||||
}
|
||||
|
||||
daemonSet := &appsv1.DaemonSet{
|
||||
daemonSet := &appsv1api.DaemonSet{
|
||||
ObjectMeta: objectMeta(namespace, dsName),
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "DaemonSet",
|
||||
APIVersion: appsv1.SchemeGroupVersion.String(),
|
||||
APIVersion: appsv1api.SchemeGroupVersion.String(),
|
||||
},
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Spec: appsv1api.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"name": dsName,
|
||||
},
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Template: corev1api.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabels(c.labels, map[string]string{
|
||||
"name": dsName,
|
||||
|
@ -82,36 +82,36 @@ func DaemonSet(namespace string, opts ...podTemplateOption) *appsv1.DaemonSet {
|
|||
}),
|
||||
Annotations: c.annotations,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Spec: corev1api.PodSpec{
|
||||
ServiceAccountName: c.serviceAccountName,
|
||||
SecurityContext: &corev1.PodSecurityContext{
|
||||
SecurityContext: &corev1api.PodSecurityContext{
|
||||
RunAsUser: &userID,
|
||||
},
|
||||
Volumes: []corev1.Volume{
|
||||
Volumes: []corev1api.Volume{
|
||||
{
|
||||
Name: "host-pods",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
HostPath: &corev1.HostPathVolumeSource{
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
HostPath: &corev1api.HostPathVolumeSource{
|
||||
Path: "/var/lib/kubelet/pods",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "host-plugins",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
HostPath: &corev1.HostPathVolumeSource{
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
HostPath: &corev1api.HostPathVolumeSource{
|
||||
Path: "/var/lib/kubelet/plugins",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "scratch",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
EmptyDir: new(corev1.EmptyDirVolumeSource),
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
EmptyDir: new(corev1api.EmptyDirVolumeSource),
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []corev1.Container{
|
||||
Containers: []corev1api.Container{
|
||||
{
|
||||
Name: dsName,
|
||||
Image: c.image,
|
||||
|
@ -121,10 +121,10 @@ func DaemonSet(namespace string, opts ...podTemplateOption) *appsv1.DaemonSet {
|
|||
"/velero",
|
||||
},
|
||||
Args: daemonSetArgs,
|
||||
SecurityContext: &corev1.SecurityContext{
|
||||
SecurityContext: &corev1api.SecurityContext{
|
||||
Privileged: &c.privilegedNodeAgent,
|
||||
},
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
VolumeMounts: []corev1api.VolumeMount{
|
||||
{
|
||||
Name: "host-pods",
|
||||
MountPath: "/host_pods",
|
||||
|
@ -140,19 +140,19 @@ func DaemonSet(namespace string, opts ...podTemplateOption) *appsv1.DaemonSet {
|
|||
MountPath: "/scratch",
|
||||
},
|
||||
},
|
||||
Env: []corev1.EnvVar{
|
||||
Env: []corev1api.EnvVar{
|
||||
{
|
||||
Name: "NODE_NAME",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
ValueFrom: &corev1api.EnvVarSource{
|
||||
FieldRef: &corev1api.ObjectFieldSelector{
|
||||
FieldPath: "spec.nodeName",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "VELERO_NAMESPACE",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
ValueFrom: &corev1api.EnvVarSource{
|
||||
FieldRef: &corev1api.ObjectFieldSelector{
|
||||
FieldPath: "metadata.namespace",
|
||||
},
|
||||
},
|
||||
|
@ -173,10 +173,10 @@ func DaemonSet(namespace string, opts ...podTemplateOption) *appsv1.DaemonSet {
|
|||
if c.withSecret {
|
||||
daemonSet.Spec.Template.Spec.Volumes = append(
|
||||
daemonSet.Spec.Template.Spec.Volumes,
|
||||
corev1.Volume{
|
||||
corev1api.Volume{
|
||||
Name: "cloud-credentials",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
Secret: &corev1api.SecretVolumeSource{
|
||||
SecretName: "cloud-credentials",
|
||||
},
|
||||
},
|
||||
|
@ -185,13 +185,13 @@ func DaemonSet(namespace string, opts ...podTemplateOption) *appsv1.DaemonSet {
|
|||
|
||||
daemonSet.Spec.Template.Spec.Containers[0].VolumeMounts = append(
|
||||
daemonSet.Spec.Template.Spec.Containers[0].VolumeMounts,
|
||||
corev1.VolumeMount{
|
||||
corev1api.VolumeMount{
|
||||
Name: "cloud-credentials",
|
||||
MountPath: "/credentials",
|
||||
},
|
||||
)
|
||||
|
||||
daemonSet.Spec.Template.Spec.Containers[0].Env = append(daemonSet.Spec.Template.Spec.Containers[0].Env, []corev1.EnvVar{
|
||||
daemonSet.Spec.Template.Spec.Containers[0].Env = append(daemonSet.Spec.Template.Spec.Containers[0].Env, []corev1api.EnvVar{
|
||||
{
|
||||
Name: "GOOGLE_APPLICATION_CREDENTIALS",
|
||||
Value: "/credentials/cloud",
|
||||
|
@ -217,10 +217,10 @@ func DaemonSet(namespace string, opts ...podTemplateOption) *appsv1.DaemonSet {
|
|||
daemonSet.Spec.Template.Spec.NodeSelector = map[string]string{
|
||||
"kubernetes.io/os": "windows",
|
||||
}
|
||||
daemonSet.Spec.Template.Spec.OS = &corev1.PodOS{
|
||||
daemonSet.Spec.Template.Spec.OS = &corev1api.PodOS{
|
||||
Name: "windows",
|
||||
}
|
||||
daemonSet.Spec.Template.Spec.Tolerations = []corev1.Toleration{
|
||||
daemonSet.Spec.Template.Spec.Tolerations = []corev1api.Toleration{
|
||||
{
|
||||
Key: "os",
|
||||
Operator: "Equal",
|
||||
|
@ -232,7 +232,7 @@ func DaemonSet(namespace string, opts ...podTemplateOption) *appsv1.DaemonSet {
|
|||
daemonSet.Spec.Template.Spec.NodeSelector = map[string]string{
|
||||
"kubernetes.io/os": "linux",
|
||||
}
|
||||
daemonSet.Spec.Template.Spec.OS = &corev1.PodOS{
|
||||
daemonSet.Spec.Template.Spec.OS = &corev1api.PodOS{
|
||||
Name: "linux",
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestDaemonSet(t *testing.T) {
|
||||
|
@ -36,15 +36,15 @@ func TestDaemonSet(t *testing.T) {
|
|||
assert.Equal(t, "node-agent", ds.Spec.Template.ObjectMeta.Labels["role"])
|
||||
assert.Equal(t, "linux", ds.Spec.Template.Spec.NodeSelector["kubernetes.io/os"])
|
||||
assert.Equal(t, "linux", string(ds.Spec.Template.Spec.OS.Name))
|
||||
assert.Equal(t, corev1.PodSecurityContext{RunAsUser: &userID}, *ds.Spec.Template.Spec.SecurityContext)
|
||||
assert.Equal(t, corev1.SecurityContext{Privileged: &boolFalse}, *ds.Spec.Template.Spec.Containers[0].SecurityContext)
|
||||
assert.Equal(t, corev1api.PodSecurityContext{RunAsUser: &userID}, *ds.Spec.Template.Spec.SecurityContext)
|
||||
assert.Equal(t, corev1api.SecurityContext{Privileged: &boolFalse}, *ds.Spec.Template.Spec.Containers[0].SecurityContext)
|
||||
|
||||
ds = DaemonSet("velero", WithPrivilegedNodeAgent(true))
|
||||
assert.Equal(t, corev1.SecurityContext{Privileged: &boolTrue}, *ds.Spec.Template.Spec.Containers[0].SecurityContext)
|
||||
assert.Equal(t, corev1api.SecurityContext{Privileged: &boolTrue}, *ds.Spec.Template.Spec.Containers[0].SecurityContext)
|
||||
|
||||
ds = DaemonSet("velero", WithImage("velero/velero:v0.11"))
|
||||
assert.Equal(t, "velero/velero:v0.11", ds.Spec.Template.Spec.Containers[0].Image)
|
||||
assert.Equal(t, corev1.PullIfNotPresent, ds.Spec.Template.Spec.Containers[0].ImagePullPolicy)
|
||||
assert.Equal(t, corev1api.PullIfNotPresent, ds.Spec.Template.Spec.Containers[0].ImagePullPolicy)
|
||||
|
||||
ds = DaemonSet("velero", WithSecret(true))
|
||||
assert.Len(t, ds.Spec.Template.Spec.Containers[0].Env, 7)
|
||||
|
@ -68,6 +68,6 @@ func TestDaemonSet(t *testing.T) {
|
|||
assert.Equal(t, "node-agent", ds.Spec.Template.ObjectMeta.Labels["role"])
|
||||
assert.Equal(t, "windows", ds.Spec.Template.Spec.NodeSelector["kubernetes.io/os"])
|
||||
assert.Equal(t, "windows", string(ds.Spec.Template.Spec.OS.Name))
|
||||
assert.Equal(t, (*corev1.PodSecurityContext)(nil), ds.Spec.Template.Spec.SecurityContext)
|
||||
assert.Equal(t, (*corev1.SecurityContext)(nil), ds.Spec.Template.Spec.Containers[0].SecurityContext)
|
||||
assert.Equal(t, (*corev1api.PodSecurityContext)(nil), ds.Spec.Template.Spec.SecurityContext)
|
||||
assert.Equal(t, (*corev1api.SecurityContext)(nil), ds.Spec.Template.Spec.Containers[0].SecurityContext)
|
||||
}
|
||||
|
|
|
@ -21,8 +21,8 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
appsv1api "k8s.io/api/apps/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/vmware-tanzu/velero/internal/velero"
|
||||
|
@ -34,11 +34,11 @@ type podTemplateOption func(*podTemplateConfig)
|
|||
|
||||
type podTemplateConfig struct {
|
||||
image string
|
||||
envVars []corev1.EnvVar
|
||||
envVars []corev1api.EnvVar
|
||||
restoreOnly bool
|
||||
annotations map[string]string
|
||||
labels map[string]string
|
||||
resources corev1.ResourceRequirements
|
||||
resources corev1api.ResourceRequirements
|
||||
withSecret bool
|
||||
defaultRepoMaintenanceFrequency time.Duration
|
||||
garbageCollectionFrequency time.Duration
|
||||
|
@ -81,11 +81,11 @@ func WithLabels(labels map[string]string) podTemplateOption {
|
|||
|
||||
func WithEnvFromSecretKey(varName, secret, key string) podTemplateOption {
|
||||
return func(c *podTemplateConfig) {
|
||||
c.envVars = append(c.envVars, corev1.EnvVar{
|
||||
c.envVars = append(c.envVars, corev1api.EnvVar{
|
||||
Name: varName,
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
SecretKeyRef: &corev1.SecretKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
ValueFrom: &corev1api.EnvVarSource{
|
||||
SecretKeyRef: &corev1api.SecretKeySelector{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: secret,
|
||||
},
|
||||
Key: key,
|
||||
|
@ -107,7 +107,7 @@ func WithRestoreOnly(b bool) podTemplateOption {
|
|||
}
|
||||
}
|
||||
|
||||
func WithResources(resources corev1.ResourceRequirements) podTemplateOption {
|
||||
func WithResources(resources corev1api.ResourceRequirements) podTemplateOption {
|
||||
return func(c *podTemplateConfig) {
|
||||
c.resources = resources
|
||||
}
|
||||
|
@ -226,7 +226,7 @@ func WithForWindows() podTemplateOption {
|
|||
}
|
||||
}
|
||||
|
||||
func Deployment(namespace string, opts ...podTemplateOption) *appsv1.Deployment {
|
||||
func Deployment(namespace string, opts ...podTemplateOption) *appsv1api.Deployment {
|
||||
// TODO: Add support for server args
|
||||
c := &podTemplateConfig{
|
||||
image: velero.DefaultVeleroImage(),
|
||||
|
@ -236,10 +236,10 @@ func Deployment(namespace string, opts ...podTemplateOption) *appsv1.Deployment
|
|||
opt(c)
|
||||
}
|
||||
|
||||
pullPolicy := corev1.PullAlways
|
||||
pullPolicy := corev1api.PullAlways
|
||||
imageParts := strings.Split(c.image, ":")
|
||||
if len(imageParts) == 2 && imageParts[1] != "latest" {
|
||||
pullPolicy = corev1.PullIfNotPresent
|
||||
pullPolicy = corev1api.PullIfNotPresent
|
||||
}
|
||||
|
||||
args := []string{"server"}
|
||||
|
@ -315,29 +315,29 @@ func Deployment(namespace string, opts ...podTemplateOption) *appsv1.Deployment
|
|||
args = append(args, fmt.Sprintf("--item-block-worker-count=%d", c.itemBlockWorkerCount))
|
||||
}
|
||||
|
||||
deployment := &appsv1.Deployment{
|
||||
deployment := &appsv1api.Deployment{
|
||||
ObjectMeta: objectMeta(namespace, "velero"),
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Deployment",
|
||||
APIVersion: appsv1.SchemeGroupVersion.String(),
|
||||
APIVersion: appsv1api.SchemeGroupVersion.String(),
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Spec: appsv1api.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"deploy": "velero"}},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Template: corev1api.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabels(c.labels, map[string]string{"deploy": "velero"}),
|
||||
Annotations: podAnnotations(c.annotations),
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
RestartPolicy: corev1.RestartPolicyAlways,
|
||||
Spec: corev1api.PodSpec{
|
||||
RestartPolicy: corev1api.RestartPolicyAlways,
|
||||
ServiceAccountName: c.serviceAccountName,
|
||||
NodeSelector: map[string]string{
|
||||
"kubernetes.io/os": "linux",
|
||||
},
|
||||
OS: &corev1.PodOS{
|
||||
OS: &corev1api.PodOS{
|
||||
Name: "linux",
|
||||
},
|
||||
Containers: []corev1.Container{
|
||||
Containers: []corev1api.Container{
|
||||
{
|
||||
Name: "velero",
|
||||
Image: c.image,
|
||||
|
@ -347,7 +347,7 @@ func Deployment(namespace string, opts ...podTemplateOption) *appsv1.Deployment
|
|||
"/velero",
|
||||
},
|
||||
Args: args,
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
VolumeMounts: []corev1api.VolumeMount{
|
||||
{
|
||||
Name: "plugins",
|
||||
MountPath: "/plugins",
|
||||
|
@ -357,15 +357,15 @@ func Deployment(namespace string, opts ...podTemplateOption) *appsv1.Deployment
|
|||
MountPath: "/scratch",
|
||||
},
|
||||
},
|
||||
Env: []corev1.EnvVar{
|
||||
Env: []corev1api.EnvVar{
|
||||
{
|
||||
Name: "VELERO_SCRATCH_DIR",
|
||||
Value: "/scratch",
|
||||
},
|
||||
{
|
||||
Name: "VELERO_NAMESPACE",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
ValueFrom: &corev1api.EnvVarSource{
|
||||
FieldRef: &corev1api.ObjectFieldSelector{
|
||||
FieldPath: "metadata.namespace",
|
||||
},
|
||||
},
|
||||
|
@ -378,17 +378,17 @@ func Deployment(namespace string, opts ...podTemplateOption) *appsv1.Deployment
|
|||
Resources: c.resources,
|
||||
},
|
||||
},
|
||||
Volumes: []corev1.Volume{
|
||||
Volumes: []corev1api.Volume{
|
||||
{
|
||||
Name: "plugins",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
EmptyDir: &corev1api.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "scratch",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
EmptyDir: new(corev1.EmptyDirVolumeSource),
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
EmptyDir: new(corev1api.EmptyDirVolumeSource),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -400,10 +400,10 @@ func Deployment(namespace string, opts ...podTemplateOption) *appsv1.Deployment
|
|||
if c.withSecret {
|
||||
deployment.Spec.Template.Spec.Volumes = append(
|
||||
deployment.Spec.Template.Spec.Volumes,
|
||||
corev1.Volume{
|
||||
corev1api.Volume{
|
||||
Name: "cloud-credentials",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
Secret: &corev1api.SecretVolumeSource{
|
||||
SecretName: "cloud-credentials",
|
||||
},
|
||||
},
|
||||
|
@ -412,13 +412,13 @@ func Deployment(namespace string, opts ...podTemplateOption) *appsv1.Deployment
|
|||
|
||||
deployment.Spec.Template.Spec.Containers[0].VolumeMounts = append(
|
||||
deployment.Spec.Template.Spec.Containers[0].VolumeMounts,
|
||||
corev1.VolumeMount{
|
||||
corev1api.VolumeMount{
|
||||
Name: "cloud-credentials",
|
||||
MountPath: "/credentials",
|
||||
},
|
||||
)
|
||||
|
||||
deployment.Spec.Template.Spec.Containers[0].Env = append(deployment.Spec.Template.Spec.Containers[0].Env, []corev1.EnvVar{
|
||||
deployment.Spec.Template.Spec.Containers[0].Env = append(deployment.Spec.Template.Spec.Containers[0].Env, []corev1api.EnvVar{
|
||||
{
|
||||
Name: "GOOGLE_APPLICATION_CREDENTIALS",
|
||||
Value: "/credentials/cloud",
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
)
|
||||
|
@ -42,7 +42,7 @@ func TestDeployment(t *testing.T) {
|
|||
|
||||
deploy = Deployment("velero", WithImage("velero/velero:v0.11"))
|
||||
assert.Equal(t, "velero/velero:v0.11", deploy.Spec.Template.Spec.Containers[0].Image)
|
||||
assert.Equal(t, corev1.PullIfNotPresent, deploy.Spec.Template.Spec.Containers[0].ImagePullPolicy)
|
||||
assert.Equal(t, corev1api.PullIfNotPresent, deploy.Spec.Template.Spec.Containers[0].ImagePullPolicy)
|
||||
|
||||
deploy = Deployment("velero", WithSecret(true))
|
||||
assert.Len(t, deploy.Spec.Template.Spec.Containers[0].Env, 7)
|
||||
|
|
|
@ -24,8 +24,8 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
appsv1api "k8s.io/api/apps/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
@ -152,11 +152,11 @@ func crdsAreReady(kbClient kbclient.Client, crds []*unstructured.Unstructured) (
|
|||
return true, nil
|
||||
}
|
||||
|
||||
func isAvailable(c appsv1.DeploymentCondition) bool {
|
||||
func isAvailable(c appsv1api.DeploymentCondition) bool {
|
||||
// Make sure that the deployment has been available for at least 10 seconds.
|
||||
// This is because the deployment can show as Ready momentarily before the pods fall into a CrashLoopBackOff.
|
||||
// See podutils.IsPodAvailable upstream for similar logic with pods
|
||||
if c.Type == appsv1.DeploymentAvailable && c.Status == corev1.ConditionTrue {
|
||||
if c.Type == appsv1api.DeploymentAvailable && c.Status == corev1api.ConditionTrue {
|
||||
if !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(10*time.Second).Before(time.Now()) {
|
||||
return true
|
||||
}
|
||||
|
@ -166,7 +166,7 @@ func isAvailable(c appsv1.DeploymentCondition) bool {
|
|||
|
||||
// DeploymentIsReady will poll the Kubernetes API server to see if the velero deployment is ready to service user requests.
|
||||
func DeploymentIsReady(factory client.DynamicFactory, namespace string) (bool, error) {
|
||||
gvk := schema.FromAPIVersionAndKind(appsv1.SchemeGroupVersion.String(), "Deployment")
|
||||
gvk := schema.FromAPIVersionAndKind(appsv1api.SchemeGroupVersion.String(), "Deployment")
|
||||
apiResource := metav1.APIResource{
|
||||
Name: "deployments",
|
||||
Namespaced: true,
|
||||
|
@ -186,7 +186,7 @@ func DeploymentIsReady(factory client.DynamicFactory, namespace string) (bool, e
|
|||
return false, errors.Wrap(err, "error waiting for deployment to be ready")
|
||||
}
|
||||
|
||||
deploy := new(appsv1.Deployment)
|
||||
deploy := new(appsv1api.Deployment)
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstructuredDeployment.Object, deploy); err != nil {
|
||||
return false, errors.Wrap(err, "error converting deployment from unstructured")
|
||||
}
|
||||
|
@ -219,7 +219,7 @@ func NodeAgentWindowsIsReady(factory client.DynamicFactory, namespace string) (b
|
|||
}
|
||||
|
||||
func daemonSetIsReady(factory client.DynamicFactory, namespace string, name string) (bool, error) {
|
||||
gvk := schema.FromAPIVersionAndKind(appsv1.SchemeGroupVersion.String(), "DaemonSet")
|
||||
gvk := schema.FromAPIVersionAndKind(appsv1api.SchemeGroupVersion.String(), "DaemonSet")
|
||||
apiResource := metav1.APIResource{
|
||||
Name: "daemonsets",
|
||||
Namespaced: true,
|
||||
|
@ -242,7 +242,7 @@ func daemonSetIsReady(factory client.DynamicFactory, namespace string, name stri
|
|||
return false, errors.Wrap(err, "error waiting for daemonset to be ready")
|
||||
}
|
||||
|
||||
daemonSet := new(appsv1.DaemonSet)
|
||||
daemonSet := new(appsv1api.DaemonSet)
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstructuredDaemonSet.Object, daemonSet); err != nil {
|
||||
return false, errors.Wrap(err, "error converting daemonset from unstructured")
|
||||
}
|
||||
|
|
|
@ -8,11 +8,11 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
appsv1api "k8s.io/api/apps/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
@ -30,7 +30,7 @@ func TestInstall(t *testing.T) {
|
|||
|
||||
c := fake.NewClientBuilder().WithObjects(
|
||||
&apiextv1.CustomResourceDefinition{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "backuprepositories.velero.io",
|
||||
},
|
||||
|
||||
|
@ -59,7 +59,7 @@ func TestInstall(t *testing.T) {
|
|||
func Test_crdsAreReady(t *testing.T) {
|
||||
c := fake.NewClientBuilder().WithObjects(
|
||||
&apiextv1beta1.CustomResourceDefinition{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "backuprepositories.velero.io",
|
||||
},
|
||||
|
||||
|
@ -79,11 +79,11 @@ func Test_crdsAreReady(t *testing.T) {
|
|||
).Build()
|
||||
|
||||
crd := &apiextv1beta1.CustomResourceDefinition{
|
||||
TypeMeta: v1.TypeMeta{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "CustomResourceDefinition",
|
||||
APIVersion: "v1beta1",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "backuprepositories.velero.io",
|
||||
},
|
||||
}
|
||||
|
@ -102,13 +102,13 @@ func Test_crdsAreReady(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDeploymentIsReady(t *testing.T) {
|
||||
deployment := &appsv1.Deployment{
|
||||
Status: appsv1.DeploymentStatus{
|
||||
Conditions: []appsv1.DeploymentCondition{
|
||||
deployment := &appsv1api.Deployment{
|
||||
Status: appsv1api.DeploymentStatus{
|
||||
Conditions: []appsv1api.DeploymentCondition{
|
||||
{
|
||||
Type: appsv1.DeploymentAvailable,
|
||||
Status: corev1.ConditionTrue,
|
||||
LastTransitionTime: v1.NewTime(time.Now().Add(-15 * time.Second)),
|
||||
Type: appsv1api.DeploymentAvailable,
|
||||
Status: corev1api.ConditionTrue,
|
||||
LastTransitionTime: metav1.NewTime(time.Now().Add(-15 * time.Second)),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -128,8 +128,8 @@ func TestDeploymentIsReady(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNodeAgentIsReady(t *testing.T) {
|
||||
daemonset := &appsv1.DaemonSet{
|
||||
Status: appsv1.DaemonSetStatus{
|
||||
daemonset := &appsv1api.DaemonSet{
|
||||
Status: appsv1api.DaemonSetStatus{
|
||||
NumberAvailable: 1,
|
||||
DesiredNumberScheduled: 1,
|
||||
},
|
||||
|
@ -149,8 +149,8 @@ func TestNodeAgentIsReady(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNodeAgentWindowsIsReady(t *testing.T) {
|
||||
daemonset := &appsv1.DaemonSet{
|
||||
Status: appsv1.DaemonSetStatus{
|
||||
daemonset := &appsv1api.DaemonSet{
|
||||
Status: appsv1api.DaemonSetStatus{
|
||||
NumberAvailable: 0,
|
||||
DesiredNumberScheduled: 0,
|
||||
},
|
||||
|
|
|
@ -20,7 +20,7 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
@ -92,8 +92,8 @@ func podAnnotations(userAnnotations map[string]string) map[string]string {
|
|||
return base
|
||||
}
|
||||
|
||||
func containerPorts() []corev1.ContainerPort {
|
||||
return []corev1.ContainerPort{
|
||||
func containerPorts() []corev1api.ContainerPort {
|
||||
return []corev1api.ContainerPort{
|
||||
{
|
||||
Name: "metrics",
|
||||
ContainerPort: 8085,
|
||||
|
@ -109,14 +109,14 @@ func objectMeta(namespace, name string) metav1.ObjectMeta {
|
|||
}
|
||||
}
|
||||
|
||||
func ServiceAccount(namespace string, annotations map[string]string) *corev1.ServiceAccount {
|
||||
func ServiceAccount(namespace string, annotations map[string]string) *corev1api.ServiceAccount {
|
||||
objMeta := objectMeta(namespace, defaultServiceAccountName)
|
||||
objMeta.Annotations = annotations
|
||||
return &corev1.ServiceAccount{
|
||||
return &corev1api.ServiceAccount{
|
||||
ObjectMeta: objMeta,
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ServiceAccount",
|
||||
APIVersion: corev1.SchemeGroupVersion.String(),
|
||||
APIVersion: corev1api.SchemeGroupVersion.String(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -149,12 +149,12 @@ func ClusterRoleBinding(namespace string) *rbacv1.ClusterRoleBinding {
|
|||
return crb
|
||||
}
|
||||
|
||||
func Namespace(namespace string) *corev1.Namespace {
|
||||
ns := &corev1.Namespace{
|
||||
func Namespace(namespace string) *corev1api.Namespace {
|
||||
ns := &corev1api.Namespace{
|
||||
ObjectMeta: objectMeta("", namespace),
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Namespace",
|
||||
APIVersion: corev1.SchemeGroupVersion.String(),
|
||||
APIVersion: corev1api.SchemeGroupVersion.String(),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -204,17 +204,17 @@ func VolumeSnapshotLocation(namespace, provider string, config map[string]string
|
|||
}
|
||||
}
|
||||
|
||||
func Secret(namespace string, data []byte) *corev1.Secret {
|
||||
return &corev1.Secret{
|
||||
func Secret(namespace string, data []byte) *corev1api.Secret {
|
||||
return &corev1api.Secret{
|
||||
ObjectMeta: objectMeta(namespace, "cloud-credentials"),
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: corev1.SchemeGroupVersion.String(),
|
||||
APIVersion: corev1api.SchemeGroupVersion.String(),
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"cloud": data,
|
||||
},
|
||||
Type: corev1.SecretTypeOpaque,
|
||||
Type: corev1api.SecretTypeOpaque,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -241,8 +241,8 @@ type VeleroOptions struct {
|
|||
PodLabels map[string]string
|
||||
ServiceAccountAnnotations map[string]string
|
||||
ServiceAccountName string
|
||||
VeleroPodResources corev1.ResourceRequirements
|
||||
NodeAgentPodResources corev1.ResourceRequirements
|
||||
VeleroPodResources corev1api.ResourceRequirements
|
||||
NodeAgentPodResources corev1api.ResourceRequirements
|
||||
SecretData []byte
|
||||
RestoreOnly bool
|
||||
UseNodeAgent bool
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
rbacbeta "k8s.io/api/rbac/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -34,7 +34,7 @@ import (
|
|||
"github.com/vmware-tanzu/velero/pkg/util/actionhelpers"
|
||||
)
|
||||
|
||||
func newV1ClusterRoleBindingList(rbacCRBList []rbac.ClusterRoleBinding) []actionhelpers.ClusterRoleBinding {
|
||||
func newV1ClusterRoleBindingList(rbacCRBList []rbacv1.ClusterRoleBinding) []actionhelpers.ClusterRoleBinding {
|
||||
var crbs []actionhelpers.ClusterRoleBinding
|
||||
for _, c := range rbacCRBList {
|
||||
crbs = append(crbs, actionhelpers.V1ClusterRoleBinding{Crb: c})
|
||||
|
@ -53,7 +53,7 @@ func newV1beta1ClusterRoleBindingList(rbacCRBList []rbacbeta.ClusterRoleBinding)
|
|||
}
|
||||
|
||||
type FakeV1ClusterRoleBindingLister struct {
|
||||
v1crbs []rbac.ClusterRoleBinding
|
||||
v1crbs []rbacv1.ClusterRoleBinding
|
||||
}
|
||||
|
||||
func (f FakeV1ClusterRoleBindingLister) List() ([]actionhelpers.ClusterRoleBinding, error) {
|
||||
|
@ -98,17 +98,17 @@ func TestNewServiceAccountAction(t *testing.T) {
|
|||
}{
|
||||
{
|
||||
name: "rbac v1 API instantiates an saAction",
|
||||
version: rbac.SchemeGroupVersion.Version,
|
||||
version: rbacv1.SchemeGroupVersion.Version,
|
||||
expectedCRBs: []actionhelpers.ClusterRoleBinding{
|
||||
actionhelpers.V1ClusterRoleBinding{
|
||||
Crb: rbac.ClusterRoleBinding{
|
||||
Crb: rbacv1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "v1crb-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
actionhelpers.V1ClusterRoleBinding{
|
||||
Crb: rbac.ClusterRoleBinding{
|
||||
Crb: rbacv1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "v1crb-2",
|
||||
},
|
||||
|
@ -146,7 +146,7 @@ func TestNewServiceAccountAction(t *testing.T) {
|
|||
discoveryHelper := velerotest.FakeDiscoveryHelper{}
|
||||
logger := velerotest.NewLogger()
|
||||
|
||||
v1crbs := []rbac.ClusterRoleBinding{
|
||||
v1crbs := []rbacv1.ClusterRoleBinding{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "v1crb-1",
|
||||
|
@ -173,7 +173,7 @@ func TestNewServiceAccountAction(t *testing.T) {
|
|||
}
|
||||
|
||||
clusterRoleBindingListers := map[string]actionhelpers.ClusterRoleBindingLister{
|
||||
rbac.SchemeGroupVersion.Version: FakeV1ClusterRoleBindingLister{v1crbs: v1crbs},
|
||||
rbacv1.SchemeGroupVersion.Version: FakeV1ClusterRoleBindingLister{v1crbs: v1crbs},
|
||||
rbacbeta.SchemeGroupVersion.Version: FakeV1beta1ClusterRoleBindingLister{v1beta1crbs: v1beta1crbs},
|
||||
"": actionhelpers.NoopClusterRoleBindingLister{},
|
||||
}
|
||||
|
@ -183,7 +183,7 @@ func TestNewServiceAccountAction(t *testing.T) {
|
|||
// We only care about the preferred version, nothing else in the list
|
||||
discoveryHelper.APIGroupsList = []metav1.APIGroup{
|
||||
{
|
||||
Name: rbac.GroupName,
|
||||
Name: rbacv1.GroupName,
|
||||
PreferredVersion: metav1.GroupVersionForDiscovery{
|
||||
Version: test.version,
|
||||
},
|
||||
|
@ -200,7 +200,7 @@ func TestServiceAccountActionExecute(t *testing.T) {
|
|||
tests := []struct {
|
||||
name string
|
||||
serviceAccount runtime.Unstructured
|
||||
crbs []rbac.ClusterRoleBinding
|
||||
crbs []rbacv1.ClusterRoleBinding
|
||||
expectedAdditionalItems []velero.ResourceIdentifier
|
||||
}{
|
||||
{
|
||||
|
@ -230,9 +230,9 @@ func TestServiceAccountActionExecute(t *testing.T) {
|
|||
}
|
||||
}
|
||||
`),
|
||||
crbs: []rbac.ClusterRoleBinding{
|
||||
crbs: []rbacv1.ClusterRoleBinding{
|
||||
{
|
||||
Subjects: []rbac.Subject{
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "non-matching-kind",
|
||||
Namespace: "non-matching-ns",
|
||||
|
@ -244,17 +244,17 @@ func TestServiceAccountActionExecute(t *testing.T) {
|
|||
Name: "velero",
|
||||
},
|
||||
{
|
||||
Kind: rbac.ServiceAccountKind,
|
||||
Kind: rbacv1.ServiceAccountKind,
|
||||
Namespace: "non-matching-ns",
|
||||
Name: "velero",
|
||||
},
|
||||
{
|
||||
Kind: rbac.ServiceAccountKind,
|
||||
Kind: rbacv1.ServiceAccountKind,
|
||||
Namespace: "velero",
|
||||
Name: "non-matching-name",
|
||||
},
|
||||
},
|
||||
RoleRef: rbac.RoleRef{
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
Name: "role",
|
||||
},
|
||||
},
|
||||
|
@ -273,19 +273,19 @@ func TestServiceAccountActionExecute(t *testing.T) {
|
|||
}
|
||||
}
|
||||
`),
|
||||
crbs: []rbac.ClusterRoleBinding{
|
||||
crbs: []rbacv1.ClusterRoleBinding{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "crb-1",
|
||||
},
|
||||
Subjects: []rbac.Subject{
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "non-matching-kind",
|
||||
Namespace: "non-matching-ns",
|
||||
Name: "non-matching-name",
|
||||
},
|
||||
},
|
||||
RoleRef: rbac.RoleRef{
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
Name: "role-1",
|
||||
},
|
||||
},
|
||||
|
@ -293,19 +293,19 @@ func TestServiceAccountActionExecute(t *testing.T) {
|
|||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "crb-2",
|
||||
},
|
||||
Subjects: []rbac.Subject{
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "non-matching-kind",
|
||||
Namespace: "non-matching-ns",
|
||||
Name: "non-matching-name",
|
||||
},
|
||||
{
|
||||
Kind: rbac.ServiceAccountKind,
|
||||
Kind: rbacv1.ServiceAccountKind,
|
||||
Namespace: "velero",
|
||||
Name: "velero",
|
||||
},
|
||||
},
|
||||
RoleRef: rbac.RoleRef{
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
Name: "role-2",
|
||||
},
|
||||
},
|
||||
|
@ -313,14 +313,14 @@ func TestServiceAccountActionExecute(t *testing.T) {
|
|||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "crb-3",
|
||||
},
|
||||
Subjects: []rbac.Subject{
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: rbac.ServiceAccountKind,
|
||||
Kind: rbacv1.ServiceAccountKind,
|
||||
Namespace: "velero",
|
||||
Name: "velero",
|
||||
},
|
||||
},
|
||||
RoleRef: rbac.RoleRef{
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
Name: "role-3",
|
||||
},
|
||||
},
|
||||
|
@ -328,9 +328,9 @@ func TestServiceAccountActionExecute(t *testing.T) {
|
|||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "crb-4",
|
||||
},
|
||||
Subjects: []rbac.Subject{
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: rbac.ServiceAccountKind,
|
||||
Kind: rbacv1.ServiceAccountKind,
|
||||
Namespace: "velero",
|
||||
Name: "velero",
|
||||
},
|
||||
|
@ -340,7 +340,7 @@ func TestServiceAccountActionExecute(t *testing.T) {
|
|||
Name: "non-matching-name",
|
||||
},
|
||||
},
|
||||
RoleRef: rbac.RoleRef{
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
Name: "role-4",
|
||||
},
|
||||
},
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
@ -137,7 +137,7 @@ func isRunningInNode(ctx context.Context, namespace string, nodeName string, crC
|
|||
return errors.New("node name is empty")
|
||||
}
|
||||
|
||||
pods := new(v1.PodList)
|
||||
pods := new(corev1api.PodList)
|
||||
parsedSelector, err := labels.Parse(fmt.Sprintf("role=%s", nodeAgentRole))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "fail to parse selector")
|
||||
|
@ -166,7 +166,7 @@ func isRunningInNode(ctx context.Context, namespace string, nodeName string, crC
|
|||
return errors.Errorf("daemonset pod not found in running state in node %s", nodeName)
|
||||
}
|
||||
|
||||
func GetPodSpec(ctx context.Context, kubeClient kubernetes.Interface, namespace string, osType string) (*v1.PodSpec, error) {
|
||||
func GetPodSpec(ctx context.Context, kubeClient kubernetes.Interface, namespace string, osType string) (*corev1api.PodSpec, error) {
|
||||
dsName := daemonSet
|
||||
if osType == kube.NodeOSWindows {
|
||||
dsName = daemonsetWindows
|
||||
|
|
|
@ -22,8 +22,8 @@ import (
|
|||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
appsv1api "k8s.io/api/apps/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
|
@ -41,7 +41,7 @@ type reactor struct {
|
|||
}
|
||||
|
||||
func TestIsRunning(t *testing.T) {
|
||||
ds := &appsv1.DaemonSet{
|
||||
ds := &appsv1api.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "fake-ns",
|
||||
Name: "node-agent",
|
||||
|
@ -106,15 +106,15 @@ func TestIsRunning(t *testing.T) {
|
|||
|
||||
func TestIsRunningInNode(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
corev1.AddToScheme(scheme)
|
||||
corev1api.AddToScheme(scheme)
|
||||
|
||||
nonNodeAgentPod := builder.ForPod("fake-ns", "fake-pod").Result()
|
||||
nodeAgentPodNotRunning := builder.ForPod("fake-ns", "fake-pod").Labels(map[string]string{"role": "node-agent"}).Result()
|
||||
nodeAgentPodRunning1 := builder.ForPod("fake-ns", "fake-pod-1").Labels(map[string]string{"role": "node-agent"}).Phase(corev1.PodRunning).Result()
|
||||
nodeAgentPodRunning2 := builder.ForPod("fake-ns", "fake-pod-2").Labels(map[string]string{"role": "node-agent"}).Phase(corev1.PodRunning).Result()
|
||||
nodeAgentPodRunning1 := builder.ForPod("fake-ns", "fake-pod-1").Labels(map[string]string{"role": "node-agent"}).Phase(corev1api.PodRunning).Result()
|
||||
nodeAgentPodRunning2 := builder.ForPod("fake-ns", "fake-pod-2").Labels(map[string]string{"role": "node-agent"}).Phase(corev1api.PodRunning).Result()
|
||||
nodeAgentPodRunning3 := builder.ForPod("fake-ns", "fake-pod-3").
|
||||
Labels(map[string]string{"role": "node-agent"}).
|
||||
Phase(corev1.PodRunning).
|
||||
Phase(corev1api.PodRunning).
|
||||
NodeName("fake-node").
|
||||
Result()
|
||||
|
||||
|
@ -185,11 +185,11 @@ func TestIsRunningInNode(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetPodSpec(t *testing.T) {
|
||||
podSpec := corev1.PodSpec{
|
||||
podSpec := corev1api.PodSpec{
|
||||
NodeName: "fake-node",
|
||||
}
|
||||
|
||||
daemonSet := &appsv1.DaemonSet{
|
||||
daemonSet := &appsv1api.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "fake-ns",
|
||||
Name: "node-agent",
|
||||
|
@ -197,8 +197,8 @@ func TestGetPodSpec(t *testing.T) {
|
|||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "DaemonSet",
|
||||
},
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: appsv1api.DaemonSetSpec{
|
||||
Template: corev1api.PodTemplateSpec{
|
||||
Spec: podSpec,
|
||||
},
|
||||
},
|
||||
|
@ -209,7 +209,7 @@ func TestGetPodSpec(t *testing.T) {
|
|||
kubeClientObj []runtime.Object
|
||||
namespace string
|
||||
expectErr string
|
||||
expectSpec corev1.PodSpec
|
||||
expectSpec corev1api.PodSpec
|
||||
}{
|
||||
{
|
||||
name: "ds is not found",
|
||||
|
@ -334,7 +334,7 @@ func TestGetConfigs(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetLabelValue(t *testing.T) {
|
||||
daemonSet := &appsv1.DaemonSet{
|
||||
daemonSet := &appsv1api.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "fake-ns",
|
||||
Name: "node-agent",
|
||||
|
@ -344,7 +344,7 @@ func TestGetLabelValue(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
daemonSetWithOtherLabel := &appsv1.DaemonSet{
|
||||
daemonSetWithOtherLabel := &appsv1api.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "fake-ns",
|
||||
Name: "node-agent",
|
||||
|
@ -352,8 +352,8 @@ func TestGetLabelValue(t *testing.T) {
|
|||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "DaemonSet",
|
||||
},
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: appsv1api.DaemonSetSpec{
|
||||
Template: corev1api.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"fake-other-label": "fake-value-1",
|
||||
|
@ -363,7 +363,7 @@ func TestGetLabelValue(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
daemonSetWithLabel := &appsv1.DaemonSet{
|
||||
daemonSetWithLabel := &appsv1api.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "fake-ns",
|
||||
Name: "node-agent",
|
||||
|
@ -371,8 +371,8 @@ func TestGetLabelValue(t *testing.T) {
|
|||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "DaemonSet",
|
||||
},
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: appsv1api.DaemonSetSpec{
|
||||
Template: corev1api.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"fake-label": "fake-value-2",
|
||||
|
@ -382,7 +382,7 @@ func TestGetLabelValue(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
daemonSetWithEmptyLabel := &appsv1.DaemonSet{
|
||||
daemonSetWithEmptyLabel := &appsv1api.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "fake-ns",
|
||||
Name: "node-agent",
|
||||
|
@ -390,8 +390,8 @@ func TestGetLabelValue(t *testing.T) {
|
|||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "DaemonSet",
|
||||
},
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: appsv1api.DaemonSetSpec{
|
||||
Template: corev1api.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"fake-label": "",
|
||||
|
@ -463,7 +463,7 @@ func TestGetLabelValue(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetAnnotationValue(t *testing.T) {
|
||||
daemonSet := &appsv1.DaemonSet{
|
||||
daemonSet := &appsv1api.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "fake-ns",
|
||||
Name: "node-agent",
|
||||
|
@ -473,7 +473,7 @@ func TestGetAnnotationValue(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
daemonSetWithOtherAnnotation := &appsv1.DaemonSet{
|
||||
daemonSetWithOtherAnnotation := &appsv1api.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "fake-ns",
|
||||
Name: "node-agent",
|
||||
|
@ -481,8 +481,8 @@ func TestGetAnnotationValue(t *testing.T) {
|
|||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "DaemonSet",
|
||||
},
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: appsv1api.DaemonSetSpec{
|
||||
Template: corev1api.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
"fake-other-annotation": "fake-value-1",
|
||||
|
@ -492,7 +492,7 @@ func TestGetAnnotationValue(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
daemonSetWithAnnotation := &appsv1.DaemonSet{
|
||||
daemonSetWithAnnotation := &appsv1api.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "fake-ns",
|
||||
Name: "node-agent",
|
||||
|
@ -500,8 +500,8 @@ func TestGetAnnotationValue(t *testing.T) {
|
|||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "DaemonSet",
|
||||
},
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: appsv1api.DaemonSetSpec{
|
||||
Template: corev1api.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
"fake-annotation": "fake-value-2",
|
||||
|
@ -511,7 +511,7 @@ func TestGetAnnotationValue(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
daemonSetWithEmptyAnnotation := &appsv1.DaemonSet{
|
||||
daemonSetWithEmptyAnnotation := &appsv1api.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "fake-ns",
|
||||
Name: "node-agent",
|
||||
|
@ -519,8 +519,8 @@ func TestGetAnnotationValue(t *testing.T) {
|
|||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "DaemonSet",
|
||||
},
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: appsv1api.DaemonSetSpec{
|
||||
Template: corev1api.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
"fake-annotation": "",
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
)
|
||||
|
@ -30,7 +30,7 @@ func PluginConfigLabelSelector(kind PluginKind, name string) string {
|
|||
return fmt.Sprintf("velero.io/plugin-config,%s=%s", name, kind)
|
||||
}
|
||||
|
||||
func GetPluginConfig(kind PluginKind, name string, client corev1client.ConfigMapInterface) (*corev1.ConfigMap, error) {
|
||||
func GetPluginConfig(kind PluginKind, name string, client corev1client.ConfigMapInterface) (*corev1api.ConfigMap, error) {
|
||||
opts := metav1.ListOptions{
|
||||
// velero.io/plugin-config: true
|
||||
// velero.io/pod-volume-restore: RestoreItemAction
|
||||
|
|
|
@ -20,7 +20,7 @@ import (
|
|||
"reflect"
|
||||
"testing"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
|
@ -35,7 +35,7 @@ func TestGetPluginConfig(t *testing.T) {
|
|||
objects []runtime.Object
|
||||
}
|
||||
pluginLabelsMap := map[string]string{"velero.io/plugin-config": "", "foo": "RestoreItemAction"}
|
||||
testConfigMap := &corev1.ConfigMap{
|
||||
testConfigMap := &corev1api.ConfigMap{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ConfigMap",
|
||||
},
|
||||
|
@ -48,7 +48,7 @@ func TestGetPluginConfig(t *testing.T) {
|
|||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want *corev1.ConfigMap
|
||||
want *corev1api.ConfigMap
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
|
@ -67,7 +67,7 @@ func TestGetPluginConfig(t *testing.T) {
|
|||
kind: PluginKindRestoreItemAction,
|
||||
name: "foo",
|
||||
objects: []runtime.Object{
|
||||
&corev1.ConfigMap{
|
||||
&corev1api.ConfigMap{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ConfigMap",
|
||||
},
|
||||
|
@ -77,7 +77,7 @@ func TestGetPluginConfig(t *testing.T) {
|
|||
Labels: pluginLabelsMap,
|
||||
},
|
||||
},
|
||||
&corev1.ConfigMap{
|
||||
&corev1api.ConfigMap{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ConfigMap",
|
||||
},
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
appv1 "k8s.io/api/apps/v1"
|
||||
appsv1api "k8s.io/api/apps/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -121,8 +121,8 @@ func TestGetVolumesRepositoryType(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func createNodeAgentDaemonset() *appv1.DaemonSet {
|
||||
ds := &appv1.DaemonSet{
|
||||
func createNodeAgentDaemonset() *appsv1api.DaemonSet {
|
||||
ds := &appsv1api.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-agent",
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
@ -39,7 +39,7 @@ import (
|
|||
"github.com/vmware-tanzu/velero/pkg/util"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
appsv1api "k8s.io/api/apps/v1"
|
||||
|
||||
veleroutil "github.com/vmware-tanzu/velero/pkg/util/velero"
|
||||
|
||||
|
@ -143,7 +143,7 @@ func waitForJobComplete(ctx context.Context, client client.Client, ns string, jo
|
|||
|
||||
func getResultFromJob(cli client.Client, job *batchv1.Job) (string, error) {
|
||||
// Get the maintenance job related pod by label selector
|
||||
podList := &v1.PodList{}
|
||||
podList := &corev1api.PodList{}
|
||||
err := cli.List(context.TODO(), podList, client.InNamespace(job.Namespace), client.MatchingLabels(map[string]string{"job-name": job.Name}))
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
@ -202,7 +202,7 @@ func getJobConfig(
|
|||
repoMaintenanceJobConfig string,
|
||||
repo *velerov1api.BackupRepository,
|
||||
) (*JobConfigs, error) {
|
||||
var cm v1.ConfigMap
|
||||
var cm corev1api.ConfigMap
|
||||
if err := client.Get(
|
||||
ctx,
|
||||
types.NamespacedName{
|
||||
|
@ -410,7 +410,7 @@ func StartNewJob(cli client.Client, ctx context.Context, repo *velerov1api.Backu
|
|||
func buildJob(cli client.Client, ctx context.Context, repo *velerov1api.BackupRepository, bslName string, config *JobConfigs,
|
||||
podResources kube.PodResources, logLevel logrus.Level, logFormat *logging.FormatFlag) (*batchv1.Job, error) {
|
||||
// Get the Velero server deployment
|
||||
deployment := &appsv1.Deployment{}
|
||||
deployment := &appsv1api.Deployment{}
|
||||
err := cli.Get(ctx, types.NamespacedName{Name: "velero", Namespace: repo.Namespace}, deployment)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -486,14 +486,14 @@ func buildJob(cli client.Client, ctx context.Context, repo *velerov1api.BackupRe
|
|||
},
|
||||
Spec: batchv1.JobSpec{
|
||||
BackoffLimit: new(int32), // Never retry
|
||||
Template: v1.PodTemplateSpec{
|
||||
Template: corev1api.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "velero-repo-maintenance-pod",
|
||||
Labels: podLabels,
|
||||
Annotations: podAnnotations,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
Spec: corev1api.PodSpec{
|
||||
Containers: []corev1api.Container{
|
||||
{
|
||||
Name: "velero-repo-maintenance-container",
|
||||
Image: image,
|
||||
|
@ -501,18 +501,18 @@ func buildJob(cli client.Client, ctx context.Context, repo *velerov1api.BackupRe
|
|||
"/velero",
|
||||
},
|
||||
Args: args,
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
ImagePullPolicy: corev1api.PullIfNotPresent,
|
||||
Env: envVars,
|
||||
EnvFrom: envFromSources,
|
||||
VolumeMounts: volumeMounts,
|
||||
Resources: resources,
|
||||
TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError,
|
||||
TerminationMessagePolicy: corev1api.TerminationMessageFallbackToLogsOnError,
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
RestartPolicy: corev1api.RestartPolicyNever,
|
||||
Volumes: volumes,
|
||||
ServiceAccountName: serviceAccount,
|
||||
Tolerations: []v1.Toleration{
|
||||
Tolerations: []corev1api.Toleration{
|
||||
{
|
||||
Key: "os",
|
||||
Operator: "Equal",
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -43,7 +43,7 @@ import (
|
|||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/logging"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
appsv1api "k8s.io/api/apps/v1"
|
||||
)
|
||||
|
||||
func TestGenerateJobName1(t *testing.T) {
|
||||
|
@ -275,7 +275,7 @@ func TestGetResultFromJob(t *testing.T) {
|
|||
}
|
||||
|
||||
// Set up test pod with no status
|
||||
pod := &v1.Pod{
|
||||
pod := &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pod",
|
||||
Namespace: "default",
|
||||
|
@ -299,10 +299,10 @@ func TestGetResultFromJob(t *testing.T) {
|
|||
assert.Equal(t, "", result)
|
||||
|
||||
// Set a non-terminated container status to the pod
|
||||
pod.Status = v1.PodStatus{
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
pod.Status = corev1api.PodStatus{
|
||||
ContainerStatuses: []corev1api.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{},
|
||||
State: corev1api.ContainerState{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -314,11 +314,11 @@ func TestGetResultFromJob(t *testing.T) {
|
|||
assert.Equal(t, "", result)
|
||||
|
||||
// Set a terminated container status to the pod
|
||||
pod.Status = v1.PodStatus{
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
pod.Status = corev1api.PodStatus{
|
||||
ContainerStatuses: []corev1api.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{},
|
||||
State: corev1api.ContainerState{
|
||||
Terminated: &corev1api.ContainerStateTerminated{},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -331,11 +331,11 @@ func TestGetResultFromJob(t *testing.T) {
|
|||
assert.Equal(t, "", result)
|
||||
|
||||
// Set a terminated container status with invalidate message to the pod
|
||||
pod.Status = v1.PodStatus{
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
pod.Status = corev1api.PodStatus{
|
||||
ContainerStatuses: []corev1api.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{
|
||||
State: corev1api.ContainerState{
|
||||
Terminated: &corev1api.ContainerStateTerminated{
|
||||
Message: "fake-message",
|
||||
},
|
||||
},
|
||||
|
@ -349,11 +349,11 @@ func TestGetResultFromJob(t *testing.T) {
|
|||
assert.Equal(t, "", result)
|
||||
|
||||
// Set a terminated container status with empty maintenance error to the pod
|
||||
pod.Status = v1.PodStatus{
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
pod.Status = corev1api.PodStatus{
|
||||
ContainerStatuses: []corev1api.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{
|
||||
State: corev1api.ContainerState{
|
||||
Terminated: &corev1api.ContainerStateTerminated{
|
||||
Message: "Repo maintenance error: ",
|
||||
},
|
||||
},
|
||||
|
@ -367,11 +367,11 @@ func TestGetResultFromJob(t *testing.T) {
|
|||
assert.Equal(t, "", result)
|
||||
|
||||
// Set a terminated container status with maintenance error to the pod
|
||||
pod.Status = v1.PodStatus{
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
pod.Status = corev1api.PodStatus{
|
||||
ContainerStatuses: []corev1api.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{
|
||||
State: corev1api.ContainerState{
|
||||
Terminated: &corev1api.ContainerStateTerminated{
|
||||
Message: "Repo maintenance error: fake-error",
|
||||
},
|
||||
},
|
||||
|
@ -404,7 +404,7 @@ func TestGetJobConfig(t *testing.T) {
|
|||
|
||||
testCases := []struct {
|
||||
name string
|
||||
repoJobConfig *v1.ConfigMap
|
||||
repoJobConfig *corev1api.ConfigMap
|
||||
expectedConfig *JobConfigs
|
||||
expectedError error
|
||||
}{
|
||||
|
@ -415,7 +415,7 @@ func TestGetJobConfig(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "Invalid JSON",
|
||||
repoJobConfig: &v1.ConfigMap{
|
||||
repoJobConfig: &corev1api.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: veleroNamespace,
|
||||
Name: repoMaintenanceJobConfig,
|
||||
|
@ -429,7 +429,7 @@ func TestGetJobConfig(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "Find config specific for BackupRepository",
|
||||
repoJobConfig: &v1.ConfigMap{
|
||||
repoJobConfig: &corev1api.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: veleroNamespace,
|
||||
Name: repoMaintenanceJobConfig,
|
||||
|
@ -463,7 +463,7 @@ func TestGetJobConfig(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "Find config specific for global",
|
||||
repoJobConfig: &v1.ConfigMap{
|
||||
repoJobConfig: &corev1api.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: veleroNamespace,
|
||||
Name: repoMaintenanceJobConfig,
|
||||
|
@ -497,7 +497,7 @@ func TestGetJobConfig(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "Specific config supersede global config",
|
||||
repoJobConfig: &v1.ConfigMap{
|
||||
repoJobConfig: &corev1api.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: veleroNamespace,
|
||||
Name: repoMaintenanceJobConfig,
|
||||
|
@ -610,9 +610,9 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
jobPodSucceeded1 := builder.ForPod(veleroNamespace, "job1").Labels(map[string]string{"job-name": "job1"}).ContainerStatuses(&v1.ContainerStatus{
|
||||
State: v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{},
|
||||
jobPodSucceeded1 := builder.ForPod(veleroNamespace, "job1").Labels(map[string]string{"job-name": "job1"}).ContainerStatuses(&corev1api.ContainerStatus{
|
||||
State: corev1api.ContainerState{
|
||||
Terminated: &corev1api.ContainerStateTerminated{},
|
||||
},
|
||||
}).Result()
|
||||
|
||||
|
@ -629,9 +629,9 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
jobPodFailed1 := builder.ForPod(veleroNamespace, "job2").Labels(map[string]string{"job-name": "job2"}).ContainerStatuses(&v1.ContainerStatus{
|
||||
State: v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{
|
||||
jobPodFailed1 := builder.ForPod(veleroNamespace, "job2").Labels(map[string]string{"job-name": "job2"}).ContainerStatuses(&corev1api.ContainerStatus{
|
||||
State: corev1api.ContainerState{
|
||||
Terminated: &corev1api.ContainerStateTerminated{
|
||||
Message: "Repo maintenance error: fake-message-2",
|
||||
},
|
||||
},
|
||||
|
@ -651,9 +651,9 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
jobPodSucceeded2 := builder.ForPod(veleroNamespace, "job3").Labels(map[string]string{"job-name": "job3"}).ContainerStatuses(&v1.ContainerStatus{
|
||||
State: v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{},
|
||||
jobPodSucceeded2 := builder.ForPod(veleroNamespace, "job3").Labels(map[string]string{"job-name": "job3"}).ContainerStatuses(&corev1api.ContainerStatus{
|
||||
State: corev1api.ContainerState{
|
||||
Terminated: &corev1api.ContainerStateTerminated{},
|
||||
},
|
||||
}).Result()
|
||||
|
||||
|
@ -671,9 +671,9 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
jobPodSucceeded3 := builder.ForPod(veleroNamespace, "job4").Labels(map[string]string{"job-name": "job4"}).ContainerStatuses(&v1.ContainerStatus{
|
||||
State: v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{},
|
||||
jobPodSucceeded3 := builder.ForPod(veleroNamespace, "job4").Labels(map[string]string{"job-name": "job4"}).ContainerStatuses(&corev1api.ContainerStatus{
|
||||
State: corev1api.ContainerState{
|
||||
Terminated: &corev1api.ContainerStateTerminated{},
|
||||
},
|
||||
}).Result()
|
||||
|
||||
|
@ -681,7 +681,7 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
|||
|
||||
scheme := runtime.NewScheme()
|
||||
batchv1.AddToScheme(scheme)
|
||||
v1.AddToScheme(scheme)
|
||||
corev1api.AddToScheme(scheme)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
|
@ -867,35 +867,35 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBuildJob(t *testing.T) {
|
||||
deploy := appsv1.Deployment{
|
||||
deploy := appsv1api.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "velero",
|
||||
Namespace: "velero",
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
Spec: appsv1api.DeploymentSpec{
|
||||
Template: corev1api.PodTemplateSpec{
|
||||
Spec: corev1api.PodSpec{
|
||||
Containers: []corev1api.Container{
|
||||
{
|
||||
Name: "velero-repo-maintenance-container",
|
||||
Image: "velero-image",
|
||||
Env: []v1.EnvVar{
|
||||
Env: []corev1api.EnvVar{
|
||||
{
|
||||
Name: "test-name",
|
||||
Value: "test-value",
|
||||
},
|
||||
},
|
||||
EnvFrom: []v1.EnvFromSource{
|
||||
EnvFrom: []corev1api.EnvFromSource{
|
||||
{
|
||||
ConfigMapRef: &v1.ConfigMapEnvSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
ConfigMapRef: &corev1api.ConfigMapEnvSource{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "test-configmap",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
SecretRef: &v1.SecretEnvSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
SecretRef: &corev1api.SecretEnvSource{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "test-secret",
|
||||
},
|
||||
},
|
||||
|
@ -914,14 +914,14 @@ func TestBuildJob(t *testing.T) {
|
|||
testCases := []struct {
|
||||
name string
|
||||
m *JobConfigs
|
||||
deploy *appsv1.Deployment
|
||||
deploy *appsv1api.Deployment
|
||||
logLevel logrus.Level
|
||||
logFormat *logging.FormatFlag
|
||||
thirdPartyLabel map[string]string
|
||||
expectedJobName string
|
||||
expectedError bool
|
||||
expectedEnv []v1.EnvVar
|
||||
expectedEnvFrom []v1.EnvFromSource
|
||||
expectedEnv []corev1api.EnvVar
|
||||
expectedEnvFrom []corev1api.EnvFromSource
|
||||
expectedPodLabel map[string]string
|
||||
}{
|
||||
{
|
||||
|
@ -939,23 +939,23 @@ func TestBuildJob(t *testing.T) {
|
|||
logFormat: logging.NewFormatFlag(),
|
||||
expectedJobName: "test-123-maintain-job",
|
||||
expectedError: false,
|
||||
expectedEnv: []v1.EnvVar{
|
||||
expectedEnv: []corev1api.EnvVar{
|
||||
{
|
||||
Name: "test-name",
|
||||
Value: "test-value",
|
||||
},
|
||||
},
|
||||
expectedEnvFrom: []v1.EnvFromSource{
|
||||
expectedEnvFrom: []corev1api.EnvFromSource{
|
||||
{
|
||||
ConfigMapRef: &v1.ConfigMapEnvSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
ConfigMapRef: &corev1api.ConfigMapEnvSource{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "test-configmap",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
SecretRef: &v1.SecretEnvSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
SecretRef: &corev1api.SecretEnvSource{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "test-secret",
|
||||
},
|
||||
},
|
||||
|
@ -980,23 +980,23 @@ func TestBuildJob(t *testing.T) {
|
|||
logFormat: logging.NewFormatFlag(),
|
||||
expectedJobName: "test-123-maintain-job",
|
||||
expectedError: false,
|
||||
expectedEnv: []v1.EnvVar{
|
||||
expectedEnv: []corev1api.EnvVar{
|
||||
{
|
||||
Name: "test-name",
|
||||
Value: "test-value",
|
||||
},
|
||||
},
|
||||
expectedEnvFrom: []v1.EnvFromSource{
|
||||
expectedEnvFrom: []corev1api.EnvFromSource{
|
||||
{
|
||||
ConfigMapRef: &v1.ConfigMapEnvSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
ConfigMapRef: &corev1api.ConfigMapEnvSource{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "test-configmap",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
SecretRef: &v1.SecretEnvSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
SecretRef: &corev1api.SecretEnvSource{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "test-secret",
|
||||
},
|
||||
},
|
||||
|
@ -1052,7 +1052,7 @@ func TestBuildJob(t *testing.T) {
|
|||
objs = append(objs, tc.deploy)
|
||||
}
|
||||
scheme := runtime.NewScheme()
|
||||
_ = appsv1.AddToScheme(scheme)
|
||||
_ = appsv1api.AddToScheme(scheme)
|
||||
_ = velerov1api.AddToScheme(scheme)
|
||||
cli := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(objs...).Build()
|
||||
|
||||
|
@ -1077,21 +1077,21 @@ func TestBuildJob(t *testing.T) {
|
|||
container := job.Spec.Template.Spec.Containers[0]
|
||||
assert.Equal(t, "velero-repo-maintenance-container", container.Name)
|
||||
assert.Equal(t, "velero-image", container.Image)
|
||||
assert.Equal(t, v1.PullIfNotPresent, container.ImagePullPolicy)
|
||||
assert.Equal(t, corev1api.PullIfNotPresent, container.ImagePullPolicy)
|
||||
|
||||
// Check container env
|
||||
assert.Equal(t, tc.expectedEnv, container.Env)
|
||||
assert.Equal(t, tc.expectedEnvFrom, container.EnvFrom)
|
||||
|
||||
// Check resources
|
||||
expectedResources := v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse(tc.m.PodResources.CPURequest),
|
||||
v1.ResourceMemory: resource.MustParse(tc.m.PodResources.MemoryRequest),
|
||||
expectedResources := corev1api.ResourceRequirements{
|
||||
Requests: corev1api.ResourceList{
|
||||
corev1api.ResourceCPU: resource.MustParse(tc.m.PodResources.CPURequest),
|
||||
corev1api.ResourceMemory: resource.MustParse(tc.m.PodResources.MemoryRequest),
|
||||
},
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse(tc.m.PodResources.CPULimit),
|
||||
v1.ResourceMemory: resource.MustParse(tc.m.PodResources.MemoryLimit),
|
||||
Limits: corev1api.ResourceList{
|
||||
corev1api.ResourceCPU: resource.MustParse(tc.m.PodResources.CPULimit),
|
||||
corev1api.ResourceMemory: resource.MustParse(tc.m.PodResources.MemoryLimit),
|
||||
},
|
||||
}
|
||||
assert.Equal(t, expectedResources, container.Resources)
|
||||
|
|
|
@ -29,7 +29,7 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
velerocredentials "github.com/vmware-tanzu/velero/internal/credentials"
|
||||
credmock "github.com/vmware-tanzu/velero/internal/credentials/mocks"
|
||||
|
@ -602,7 +602,7 @@ func TestGetStoreOptions(t *testing.T) {
|
|||
|
||||
func TestPrepareRepo(t *testing.T) {
|
||||
bsl := velerov1api.BackupStorageLocation{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-bsl",
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
},
|
||||
|
@ -1095,7 +1095,7 @@ func TestBatchForget(t *testing.T) {
|
|||
|
||||
func TestInitRepo(t *testing.T) {
|
||||
bsl := velerov1api.BackupStorageLocation{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-bsl",
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
},
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -34,50 +34,50 @@ import (
|
|||
func TestAddPVCFromPodActionExecute(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
item *v1.Pod
|
||||
item *corev1api.Pod
|
||||
want []velero.ResourceIdentifier
|
||||
}{
|
||||
{
|
||||
name: "pod with no volumes returns no additional items",
|
||||
item: &v1.Pod{},
|
||||
item: &corev1api.Pod{},
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
name: "pod with some PVCs returns them as additional items",
|
||||
item: &v1.Pod{
|
||||
item: &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "ns-1",
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: new(v1.EmptyDirVolumeSource),
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
EmptyDir: new(corev1api.EmptyDirVolumeSource),
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: new(v1.HostPathVolumeSource),
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
HostPath: new(corev1api.HostPathVolumeSource),
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvc-3",
|
||||
},
|
||||
},
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
|
@ -144,7 +144,7 @@ func (a *ChangeImageNameAction) Execute(input *velero.RestoreItemActionExecuteIn
|
|||
return velero.NewRestoreItemActionExecuteOutput(obj), nil
|
||||
}
|
||||
|
||||
func (a *ChangeImageNameAction) replaceImageName(obj *unstructured.Unstructured, config *corev1.ConfigMap, filed ...string) error {
|
||||
func (a *ChangeImageNameAction) replaceImageName(obj *unstructured.Unstructured, config *corev1api.ConfigMap, filed ...string) error {
|
||||
log := a.logger.WithFields(map[string]any{
|
||||
"kind": obj.GetKind(),
|
||||
"namespace": obj.GetNamespace(),
|
||||
|
@ -179,7 +179,7 @@ func (a *ChangeImageNameAction) replaceImageName(obj *unstructured.Unstructured,
|
|||
return nil
|
||||
}
|
||||
|
||||
func (a *ChangeImageNameAction) isImageReplaceRuleExist(log *logrus.Entry, oldImageName string, cm *corev1.ConfigMap) (exists bool, newImageName string, err error) {
|
||||
func (a *ChangeImageNameAction) isImageReplaceRuleExist(log *logrus.Entry, oldImageName string, cm *corev1api.ConfigMap) (exists bool, newImageName string, err error) {
|
||||
if oldImageName == "" {
|
||||
log.Infoln("Item has no old image name specified")
|
||||
return false, "", nil
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -41,7 +41,7 @@ func TestChangeImageRepositoryActionExecute(t *testing.T) {
|
|||
tests := []struct {
|
||||
name string
|
||||
podOrObj any
|
||||
configMap *corev1.ConfigMap
|
||||
configMap *corev1api.ConfigMap
|
||||
freshedImageName string
|
||||
imageNameSlice []string
|
||||
want any
|
||||
|
@ -50,7 +50,7 @@ func TestChangeImageRepositoryActionExecute(t *testing.T) {
|
|||
{
|
||||
name: "a valid mapping with spaces for a new image repository is applied correctly",
|
||||
podOrObj: builder.ForPod("default", "pod1").ObjectMeta().
|
||||
Containers(&corev1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container1",
|
||||
Image: "1.1.1.1:5000/abc:test",
|
||||
}).Result(),
|
||||
|
@ -65,7 +65,7 @@ func TestChangeImageRepositoryActionExecute(t *testing.T) {
|
|||
{
|
||||
name: "a valid mapping for a new image repository is applied correctly",
|
||||
podOrObj: builder.ForPod("default", "pod1").ObjectMeta().
|
||||
Containers(&corev1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container2",
|
||||
Image: "1.1.1.1:5000/abc:test",
|
||||
}).Result(),
|
||||
|
@ -80,7 +80,7 @@ func TestChangeImageRepositoryActionExecute(t *testing.T) {
|
|||
{
|
||||
name: "a valid mapping for a new image name is applied correctly",
|
||||
podOrObj: builder.ForPod("default", "pod1").ObjectMeta().
|
||||
Containers(&corev1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container3",
|
||||
Image: "1.1.1.1:5000/abc:test",
|
||||
}).Result(),
|
||||
|
@ -95,7 +95,7 @@ func TestChangeImageRepositoryActionExecute(t *testing.T) {
|
|||
{
|
||||
name: "a valid mapping for a new image repository port is applied correctly",
|
||||
podOrObj: builder.ForPod("default", "pod1").ObjectMeta().
|
||||
Containers(&corev1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container4",
|
||||
Image: "1.1.1.1:5000/abc:test",
|
||||
}).Result(),
|
||||
|
@ -110,7 +110,7 @@ func TestChangeImageRepositoryActionExecute(t *testing.T) {
|
|||
{
|
||||
name: "a valid mapping for a new image tag is applied correctly",
|
||||
podOrObj: builder.ForPod("default", "pod1").ObjectMeta().
|
||||
Containers(&corev1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container5",
|
||||
Image: "1.1.1.1:5000/abc:test",
|
||||
}).Result(),
|
||||
|
@ -125,7 +125,7 @@ func TestChangeImageRepositoryActionExecute(t *testing.T) {
|
|||
{
|
||||
name: "image name contains more than one part that matching the replacing words.",
|
||||
podOrObj: builder.ForPod("default", "pod1").ObjectMeta().
|
||||
Containers(&corev1.Container{
|
||||
Containers(&corev1api.Container{
|
||||
Name: "container6",
|
||||
Image: "dev/image1:dev",
|
||||
}).Result(),
|
||||
|
@ -170,7 +170,7 @@ func TestChangeImageRepositoryActionExecute(t *testing.T) {
|
|||
assert.EqualError(t, err, tc.wantErr.Error())
|
||||
default:
|
||||
assert.NoError(t, err)
|
||||
pod := new(corev1.Pod)
|
||||
pod := new(corev1api.Pod)
|
||||
err = runtime.DefaultUnstructuredConverter.FromUnstructured(res.UpdatedItem.UnstructuredContent(), pod)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.want, pod.Spec.Containers[0].Image)
|
||||
|
|
|
@ -21,8 +21,8 @@ import (
|
|||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
appsv1api "k8s.io/api/apps/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -92,7 +92,7 @@ func (a *ChangeStorageClassAction) Execute(input *velero.RestoreItemActionExecut
|
|||
|
||||
// change StatefulSet volumeClaimTemplates storageClassName
|
||||
if obj.GetKind() == "StatefulSet" {
|
||||
sts := new(appsv1.StatefulSet)
|
||||
sts := new(appsv1api.StatefulSet)
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), sts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -140,7 +140,7 @@ func (a *ChangeStorageClassAction) Execute(input *velero.RestoreItemActionExecut
|
|||
return velero.NewRestoreItemActionExecuteOutput(obj), nil
|
||||
}
|
||||
|
||||
func (a *ChangeStorageClassAction) isStorageClassExist(log *logrus.Entry, storageClass *string, cm *corev1.ConfigMap) (exists bool, newStorageClass string, err error) {
|
||||
func (a *ChangeStorageClassAction) isStorageClassExist(log *logrus.Entry, storageClass *string, cm *corev1api.ConfigMap) (exists bool, newStorageClass string, err error) {
|
||||
if storageClass == nil || *storageClass == "" {
|
||||
log.Debug("Item has no storage class specified")
|
||||
return false, "", nil
|
||||
|
|
|
@ -19,7 +19,7 @@ package actions
|
|||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
|
@ -47,7 +47,7 @@ func (a *ClusterRoleBindingAction) Execute(input *velero.RestoreItemActionExecut
|
|||
return velero.NewRestoreItemActionExecuteOutput(&unstructured.Unstructured{Object: input.Item.UnstructuredContent()}), nil
|
||||
}
|
||||
|
||||
clusterRoleBinding := new(rbac.ClusterRoleBinding)
|
||||
clusterRoleBinding := new(rbacv1.ClusterRoleBinding)
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(input.Item.UnstructuredContent(), clusterRoleBinding); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
|
@ -73,15 +73,15 @@ func TestClusterRoleBindingActionExecute(t *testing.T) {
|
|||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
subjects := []rbac.Subject{}
|
||||
subjects := []rbacv1.Subject{}
|
||||
|
||||
for _, ns := range tc.namespaces {
|
||||
subjects = append(subjects, rbac.Subject{
|
||||
subjects = append(subjects, rbacv1.Subject{
|
||||
Namespace: ns,
|
||||
})
|
||||
}
|
||||
|
||||
clusterRoleBinding := rbac.ClusterRoleBinding{
|
||||
clusterRoleBinding := rbacv1.ClusterRoleBinding{
|
||||
Subjects: subjects,
|
||||
}
|
||||
|
||||
|
@ -100,7 +100,7 @@ func TestClusterRoleBindingActionExecute(t *testing.T) {
|
|||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
var resClusterRoleBinding *rbac.ClusterRoleBinding
|
||||
var resClusterRoleBinding *rbacv1.ClusterRoleBinding
|
||||
err = runtime.DefaultUnstructuredConverter.FromUnstructured(res.UpdatedItem.UnstructuredContent(), &resClusterRoleBinding)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -39,13 +39,13 @@ import (
|
|||
func TestDataUploadRetrieveActionExectue(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
velerov1.AddToScheme(scheme)
|
||||
corev1.AddToScheme(scheme)
|
||||
corev1api.AddToScheme(scheme)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
dataUpload *velerov2alpha1.DataUpload
|
||||
restore *velerov1.Restore
|
||||
expectedDataUploadResult *corev1.ConfigMap
|
||||
expectedDataUploadResult *corev1api.ConfigMap
|
||||
expectedErr string
|
||||
runtimeScheme *runtime.Scheme
|
||||
veleroObjs []runtime.Object
|
||||
|
@ -110,7 +110,7 @@ func TestDataUploadRetrieveActionExectue(t *testing.T) {
|
|||
}
|
||||
|
||||
if tc.expectedDataUploadResult != nil {
|
||||
var cmList corev1.ConfigMapList
|
||||
var cmList corev1api.ConfigMapList
|
||||
err := fakeClient.List(context.Background(), &cmList, &client.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(map[string]string{
|
||||
velerov1.RestoreUIDLabel: "testingUID",
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
|
@ -44,7 +44,7 @@ func (a *PodAction) AppliesTo() (velero.ResourceSelector, error) {
|
|||
}
|
||||
|
||||
func (a *PodAction) Execute(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) {
|
||||
pod := new(v1.Pod)
|
||||
pod := new(corev1api.Pod)
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(input.Item.UnstructuredContent(), pod); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
@ -54,7 +54,7 @@ func (a *PodAction) Execute(input *velero.RestoreItemActionExecuteInput) (*veler
|
|||
|
||||
serviceAccountTokenPrefix := pod.Spec.ServiceAccountName + "-token-"
|
||||
|
||||
var preservedVolumes []v1.Volume
|
||||
var preservedVolumes []corev1api.Volume
|
||||
for _, vol := range pod.Spec.Volumes {
|
||||
if !strings.HasPrefix(vol.Name, serviceAccountTokenPrefix) {
|
||||
preservedVolumes = append(preservedVolumes, vol)
|
||||
|
@ -63,7 +63,7 @@ func (a *PodAction) Execute(input *velero.RestoreItemActionExecuteInput) (*veler
|
|||
pod.Spec.Volumes = preservedVolumes
|
||||
|
||||
for i, container := range pod.Spec.Containers {
|
||||
var preservedVolumeMounts []v1.VolumeMount
|
||||
var preservedVolumeMounts []corev1api.VolumeMount
|
||||
for _, mount := range container.VolumeMounts {
|
||||
if !strings.HasPrefix(mount.Name, serviceAccountTokenPrefix) {
|
||||
preservedVolumeMounts = append(preservedVolumeMounts, mount)
|
||||
|
@ -73,7 +73,7 @@ func (a *PodAction) Execute(input *velero.RestoreItemActionExecuteInput) (*veler
|
|||
}
|
||||
|
||||
for i, container := range pod.Spec.InitContainers {
|
||||
var preservedVolumeMounts []v1.VolumeMount
|
||||
var preservedVolumeMounts []corev1api.VolumeMount
|
||||
for _, mount := range container.VolumeMounts {
|
||||
if !strings.HasPrefix(mount.Name, serviceAccountTokenPrefix) {
|
||||
preservedVolumeMounts = append(preservedVolumeMounts, mount)
|
||||
|
|
|
@ -25,8 +25,8 @@ import (
|
|||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
appsv1api "k8s.io/api/apps/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
@ -60,7 +60,7 @@ type PodVolumeRestoreAction struct {
|
|||
}
|
||||
|
||||
func NewPodVolumeRestoreAction(logger logrus.FieldLogger, client corev1client.ConfigMapInterface, crClient ctrlclient.Client, namespace string) (*PodVolumeRestoreAction, error) {
|
||||
deployment := &appsv1.Deployment{}
|
||||
deployment := &appsv1api.Deployment{}
|
||||
if err := crClient.Get(context.TODO(), types.NamespacedName{Name: "velero", Namespace: namespace}, deployment); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -83,7 +83,7 @@ func (a *PodVolumeRestoreAction) Execute(input *velero.RestoreItemActionExecuteI
|
|||
a.logger.Info("Executing PodVolumeRestoreAction")
|
||||
defer a.logger.Info("Done executing PodVolumeRestoreAction")
|
||||
|
||||
var pod corev1.Pod
|
||||
var pod corev1api.Pod
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(input.Item.UnstructuredContent(), &pod); err != nil {
|
||||
return nil, errors.Wrap(err, "unable to convert pod from runtime.Unstructured")
|
||||
}
|
||||
|
@ -92,7 +92,7 @@ func (a *PodVolumeRestoreAction) Execute(input *velero.RestoreItemActionExecuteI
|
|||
// has not yet been applied to `input.Item` so we can't perform a reverse-lookup in
|
||||
// the namespace mapping in the restore spec. Instead, use the pod from the backup
|
||||
// so that if the mapping is applied earlier, we still use the correct namespace.
|
||||
var podFromBackup corev1.Pod
|
||||
var podFromBackup corev1api.Pod
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(input.ItemFromBackup.UnstructuredContent(), &podFromBackup); err != nil {
|
||||
return nil, errors.Wrap(err, "unable to convert source pod from runtime.Unstructured")
|
||||
}
|
||||
|
@ -156,7 +156,7 @@ func (a *PodVolumeRestoreAction) Execute(input *velero.RestoreItemActionExecuteI
|
|||
|
||||
runAsUser, runAsGroup, allowPrivilegeEscalation, secCtx := getSecurityContext(log, config)
|
||||
|
||||
var securityContext corev1.SecurityContext
|
||||
var securityContext corev1api.SecurityContext
|
||||
securityContextSet := false
|
||||
// Use securityContext settings from configmap if available
|
||||
if runAsUser != "" || runAsGroup != "" || allowPrivilegeEscalation != "" || secCtx != "" {
|
||||
|
@ -181,7 +181,7 @@ func (a *PodVolumeRestoreAction) Execute(input *velero.RestoreItemActionExecuteI
|
|||
initContainerBuilder.SecurityContext(&securityContext)
|
||||
|
||||
for volumeName := range volumeSnapshots {
|
||||
mount := &corev1.VolumeMount{
|
||||
mount := &corev1api.VolumeMount{
|
||||
Name: volumeName,
|
||||
MountPath: "/restores/" + volumeName,
|
||||
}
|
||||
|
@ -191,7 +191,7 @@ func (a *PodVolumeRestoreAction) Execute(input *velero.RestoreItemActionExecuteI
|
|||
|
||||
initContainer := *initContainerBuilder.Result()
|
||||
if len(pod.Spec.InitContainers) == 0 || (pod.Spec.InitContainers[0].Name != restorehelper.WaitInitContainer && pod.Spec.InitContainers[0].Name != restorehelper.WaitInitContainerLegacy) {
|
||||
pod.Spec.InitContainers = append([]corev1.Container{initContainer}, pod.Spec.InitContainers...)
|
||||
pod.Spec.InitContainers = append([]corev1api.Container{initContainer}, pod.Spec.InitContainers...)
|
||||
} else {
|
||||
pod.Spec.InitContainers[0] = initContainer
|
||||
}
|
||||
|
@ -204,7 +204,7 @@ func (a *PodVolumeRestoreAction) Execute(input *velero.RestoreItemActionExecuteI
|
|||
return velero.NewRestoreItemActionExecuteOutput(&unstructured.Unstructured{Object: res}), nil
|
||||
}
|
||||
|
||||
func getCommand(log logrus.FieldLogger, config *corev1.ConfigMap) []string {
|
||||
func getCommand(log logrus.FieldLogger, config *corev1api.ConfigMap) []string {
|
||||
if config == nil {
|
||||
log.Debug("No config found for plugin")
|
||||
return []string{defaultCommand}
|
||||
|
@ -219,7 +219,7 @@ func getCommand(log logrus.FieldLogger, config *corev1.ConfigMap) []string {
|
|||
return []string{config.Data["command"]}
|
||||
}
|
||||
|
||||
func getImage(log logrus.FieldLogger, config *corev1.ConfigMap, defaultImage string) string {
|
||||
func getImage(log logrus.FieldLogger, config *corev1api.ConfigMap, defaultImage string) string {
|
||||
if config == nil {
|
||||
log.Debug("No config found for plugin")
|
||||
return defaultImage
|
||||
|
@ -254,7 +254,7 @@ func getImage(log logrus.FieldLogger, config *corev1.ConfigMap, defaultImage str
|
|||
|
||||
// getResourceRequests extracts the CPU and memory requests from a ConfigMap.
|
||||
// The 0 values are valid if the keys are not present
|
||||
func getResourceRequests(log logrus.FieldLogger, config *corev1.ConfigMap) (string, string) {
|
||||
func getResourceRequests(log logrus.FieldLogger, config *corev1api.ConfigMap) (string, string) {
|
||||
if config == nil {
|
||||
log.Debug("No config found for plugin")
|
||||
return "", ""
|
||||
|
@ -265,7 +265,7 @@ func getResourceRequests(log logrus.FieldLogger, config *corev1.ConfigMap) (stri
|
|||
|
||||
// getResourceLimits extracts the CPU and memory limits from a ConfigMap.
|
||||
// The 0 values are valid if the keys are not present
|
||||
func getResourceLimits(log logrus.FieldLogger, config *corev1.ConfigMap) (string, string) {
|
||||
func getResourceLimits(log logrus.FieldLogger, config *corev1api.ConfigMap) (string, string) {
|
||||
if config == nil {
|
||||
log.Debug("No config found for plugin")
|
||||
return "", ""
|
||||
|
@ -275,7 +275,7 @@ func getResourceLimits(log logrus.FieldLogger, config *corev1.ConfigMap) (string
|
|||
}
|
||||
|
||||
// getSecurityContext extracts securityContext runAsUser, runAsGroup, allowPrivilegeEscalation, and securityContext from a ConfigMap.
|
||||
func getSecurityContext(log logrus.FieldLogger, config *corev1.ConfigMap) (string, string, string, string) {
|
||||
func getSecurityContext(log logrus.FieldLogger, config *corev1api.ConfigMap) (string, string, string, string) {
|
||||
if config == nil {
|
||||
log.Debug("No config found for plugin")
|
||||
return "", "", "", ""
|
||||
|
@ -290,19 +290,19 @@ func getSecurityContext(log logrus.FieldLogger, config *corev1.ConfigMap) (strin
|
|||
func newRestoreInitContainerBuilder(image, restoreUID string) *builder.ContainerBuilder {
|
||||
return builder.ForContainer(restorehelper.WaitInitContainer, image).
|
||||
Args(restoreUID).
|
||||
Env([]*corev1.EnvVar{
|
||||
Env([]*corev1api.EnvVar{
|
||||
{
|
||||
Name: "POD_NAMESPACE",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
ValueFrom: &corev1api.EnvVarSource{
|
||||
FieldRef: &corev1api.ObjectFieldSelector{
|
||||
FieldPath: "metadata.namespace",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "POD_NAME",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
ValueFrom: &corev1api.EnvVarSource{
|
||||
FieldRef: &corev1api.ObjectFieldSelector{
|
||||
FieldPath: "metadata.name",
|
||||
},
|
||||
},
|
||||
|
@ -312,15 +312,15 @@ func newRestoreInitContainerBuilder(image, restoreUID string) *builder.Container
|
|||
|
||||
// defaultSecurityCtx returns a default security context for the init container, which has the level "restricted" per
|
||||
// Pod Security Standards.
|
||||
func defaultSecurityCtx() corev1.SecurityContext {
|
||||
func defaultSecurityCtx() corev1api.SecurityContext {
|
||||
uid := int64(restoreHelperUID)
|
||||
return corev1.SecurityContext{
|
||||
return corev1api.SecurityContext{
|
||||
AllowPrivilegeEscalation: boolptr.False(),
|
||||
Capabilities: &corev1.Capabilities{
|
||||
Drop: []corev1.Capability{"ALL"},
|
||||
Capabilities: &corev1api.Capabilities{
|
||||
Drop: []corev1api.Capability{"ALL"},
|
||||
},
|
||||
SeccompProfile: &corev1.SeccompProfile{
|
||||
Type: corev1.SeccompProfileTypeRuntimeDefault,
|
||||
SeccompProfile: &corev1api.SeccompProfile{
|
||||
Type: corev1api.SeccompProfileTypeRuntimeDefault,
|
||||
},
|
||||
RunAsUser: &uid,
|
||||
RunAsNonRoot: boolptr.True(),
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
appsv1api "k8s.io/api/apps/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
@ -300,16 +300,16 @@ func TestPodVolumeRestoreActionExecute(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
veleroDeployment := &appsv1.Deployment{
|
||||
veleroDeployment := &appsv1api.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: appsv1.SchemeGroupVersion.String(),
|
||||
APIVersion: appsv1api.SchemeGroupVersion.String(),
|
||||
Kind: "Deployment",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "velero",
|
||||
Name: "velero",
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Spec: appsv1api.DeploymentSpec{
|
||||
Template: corev1api.PodTemplateSpec{
|
||||
Spec: corev1api.PodSpec{
|
||||
Containers: []corev1api.Container{
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -196,7 +196,7 @@ func getNewNodeFromConfigMap(client corev1client.ConfigMapInterface, node string
|
|||
func isNodeExist(nodeClient corev1client.NodeInterface, name string) (bool, error) {
|
||||
_, err := nodeClient.Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if k8serrors.IsNotFound(err) {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
|
|
|
@ -19,7 +19,7 @@ package actions
|
|||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
|
@ -47,7 +47,7 @@ func (a *RoleBindingAction) Execute(input *velero.RestoreItemActionExecuteInput)
|
|||
return velero.NewRestoreItemActionExecuteOutput(&unstructured.Unstructured{Object: input.Item.UnstructuredContent()}), nil
|
||||
}
|
||||
|
||||
roleBinding := new(rbac.RoleBinding)
|
||||
roleBinding := new(rbacv1.RoleBinding)
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(input.Item.UnstructuredContent(), roleBinding); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
|
@ -73,15 +73,15 @@ func TestRoleBindingActionExecute(t *testing.T) {
|
|||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
subjects := []rbac.Subject{}
|
||||
subjects := []rbacv1.Subject{}
|
||||
|
||||
for _, ns := range tc.namespaces {
|
||||
subjects = append(subjects, rbac.Subject{
|
||||
subjects = append(subjects, rbacv1.Subject{
|
||||
Namespace: ns,
|
||||
})
|
||||
}
|
||||
|
||||
roleBinding := rbac.RoleBinding{
|
||||
roleBinding := rbacv1.RoleBinding{
|
||||
Subjects: subjects,
|
||||
}
|
||||
|
||||
|
@ -100,7 +100,7 @@ func TestRoleBindingActionExecute(t *testing.T) {
|
|||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
var resRoleBinding *rbac.RoleBinding
|
||||
var resRoleBinding *rbacv1.RoleBinding
|
||||
err = runtime.DefaultUnstructuredConverter.FromUnstructured(res.UpdatedItem.UnstructuredContent(), &resRoleBinding)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
@ -58,13 +58,13 @@ func (s *SecretAction) Execute(input *velero.RestoreItemActionExecuteInput) (*ve
|
|||
s.logger.Info("Executing SecretAction")
|
||||
defer s.logger.Info("Done executing SecretAction")
|
||||
|
||||
var secret corev1.Secret
|
||||
var secret corev1api.Secret
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(input.Item.UnstructuredContent(), &secret); err != nil {
|
||||
return nil, errors.Wrap(err, "unable to convert secret from runtime.Unstructured")
|
||||
}
|
||||
|
||||
log := s.logger.WithField("secret", kube.NamespaceAndName(&secret))
|
||||
if secret.Type != corev1.SecretTypeServiceAccountToken {
|
||||
if secret.Type != corev1api.SecretTypeServiceAccountToken {
|
||||
log.Debug("No match found - including this secret")
|
||||
return &velero.RestoreItemActionExecuteOutput{
|
||||
UpdatedItem: input.Item,
|
||||
|
@ -74,7 +74,7 @@ func (s *SecretAction) Execute(input *velero.RestoreItemActionExecuteInput) (*ve
|
|||
// The auto created service account token secret will be created by kube controller automatically again(before Kubernetes v1.22), no need to restore.
|
||||
// This will cause the patch operation of managedFields failed if we restore it as the secret is removed immediately
|
||||
// after restoration and the patch operation reports not found error.
|
||||
list := &corev1.ServiceAccountList{}
|
||||
list := &corev1api.ServiceAccountList{}
|
||||
if err := s.client.List(context.Background(), list, &client.ListOptions{Namespace: secret.Namespace}); err != nil {
|
||||
return nil, errors.Wrap(err, "unable to list the service accounts")
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -42,39 +42,39 @@ func TestSecretActionAppliesTo(t *testing.T) {
|
|||
func TestSecretActionExecute(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input *corev1.Secret
|
||||
serviceAccount *corev1.ServiceAccount
|
||||
input *corev1api.Secret
|
||||
serviceAccount *corev1api.ServiceAccount
|
||||
skipped bool
|
||||
output *corev1.Secret
|
||||
output *corev1api.Secret
|
||||
}{
|
||||
{
|
||||
name: "not service account token secret",
|
||||
input: &corev1.Secret{
|
||||
input: &corev1api.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "foo",
|
||||
Name: "default-token-sfafa",
|
||||
},
|
||||
Type: corev1.SecretTypeOpaque,
|
||||
Type: corev1api.SecretTypeOpaque,
|
||||
},
|
||||
skipped: false,
|
||||
output: &corev1.Secret{
|
||||
output: &corev1api.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "foo",
|
||||
Name: "default-token-sfafa",
|
||||
},
|
||||
Type: corev1.SecretTypeOpaque,
|
||||
Type: corev1api.SecretTypeOpaque,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "auto created service account token",
|
||||
input: &corev1.Secret{
|
||||
input: &corev1api.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "foo",
|
||||
Name: "default-token-sfafa",
|
||||
},
|
||||
Type: corev1.SecretTypeServiceAccountToken,
|
||||
Type: corev1api.SecretTypeServiceAccountToken,
|
||||
},
|
||||
serviceAccount: &corev1.ServiceAccount{
|
||||
serviceAccount: &corev1api.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "foo",
|
||||
Name: "default",
|
||||
|
@ -84,7 +84,7 @@ func TestSecretActionExecute(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "not auto created service account token",
|
||||
input: &corev1.Secret{
|
||||
input: &corev1api.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "foo",
|
||||
Name: "my-token",
|
||||
|
@ -93,7 +93,7 @@ func TestSecretActionExecute(t *testing.T) {
|
|||
"key": "value",
|
||||
},
|
||||
},
|
||||
Type: corev1.SecretTypeServiceAccountToken,
|
||||
Type: corev1api.SecretTypeServiceAccountToken,
|
||||
Data: map[string][]byte{
|
||||
"token": []byte("token"),
|
||||
"ca.crt": []byte("ca"),
|
||||
|
@ -101,7 +101,7 @@ func TestSecretActionExecute(t *testing.T) {
|
|||
},
|
||||
},
|
||||
skipped: false,
|
||||
output: &corev1.Secret{
|
||||
output: &corev1api.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "foo",
|
||||
Name: "my-token",
|
||||
|
@ -109,7 +109,7 @@ func TestSecretActionExecute(t *testing.T) {
|
|||
"key": "value",
|
||||
},
|
||||
},
|
||||
Type: corev1.SecretTypeServiceAccountToken,
|
||||
Type: corev1api.SecretTypeServiceAccountToken,
|
||||
Data: map[string][]byte{
|
||||
"key": []byte("value"),
|
||||
},
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
|
@ -47,7 +47,7 @@ func (a *ServiceAccountAction) Execute(input *velero.RestoreItemActionExecuteInp
|
|||
a.logger.Info("Executing ServiceAccountAction")
|
||||
defer a.logger.Info("Done executing ServiceAccountAction")
|
||||
|
||||
var serviceAccount corev1.ServiceAccount
|
||||
var serviceAccount corev1api.ServiceAccount
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(input.Item.UnstructuredContent(), &serviceAccount); err != nil {
|
||||
return nil, errors.Wrap(err, "unable to convert serviceaccount from runtime.Unstructured")
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -73,7 +73,7 @@ func TestServiceAccountActionExecute(t *testing.T) {
|
|||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
sa := corev1.ServiceAccount{
|
||||
sa := corev1api.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "foo",
|
||||
Name: "bar",
|
||||
|
@ -81,7 +81,7 @@ func TestServiceAccountActionExecute(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, secret := range tc.secrets {
|
||||
sa.Secrets = append(sa.Secrets, corev1.ObjectReference{
|
||||
sa.Secrets = append(sa.Secrets, corev1api.ObjectReference{
|
||||
Name: secret,
|
||||
})
|
||||
}
|
||||
|
@ -97,7 +97,7 @@ func TestServiceAccountActionExecute(t *testing.T) {
|
|||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
var resSA *corev1.ServiceAccount
|
||||
var resSA *corev1api.ServiceAccount
|
||||
err = runtime.DefaultUnstructuredConverter.FromUnstructured(res.UpdatedItem.UnstructuredContent(), &resSA)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/version"
|
||||
|
@ -193,7 +193,7 @@ func k8sPrioritySort(gvs []metav1.GroupVersionForDiscovery) {
|
|||
|
||||
// userResourceGroupVersionPriorities retrieves a user-provided config map and
|
||||
// extracts the user priority versions for each resource.
|
||||
func userResourceGroupVersionPriorities(ctx *restoreContext, cm *corev1.ConfigMap) map[string]metav1.APIGroup {
|
||||
func userResourceGroupVersionPriorities(ctx *restoreContext, cm *corev1api.ConfigMap) map[string]metav1.APIGroup {
|
||||
if cm == nil {
|
||||
ctx.log.Debugf("No enableapigroupversion config map found in velero namespace. Using pre-defined priorities.")
|
||||
return nil
|
||||
|
@ -208,7 +208,7 @@ func userResourceGroupVersionPriorities(ctx *restoreContext, cm *corev1.ConfigMa
|
|||
return priorities
|
||||
}
|
||||
|
||||
func userPriorityConfigMap() (*corev1.ConfigMap, error) {
|
||||
func userPriorityConfigMap() (*corev1api.ConfigMap, error) {
|
||||
cfg, err := client.LoadConfig()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "reading client config file")
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue