commit
587bc4f0a5
|
@ -0,0 +1,6 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.9.x
|
||||
|
||||
script: make ci
|
6
Makefile
6
Makefile
|
@ -87,6 +87,7 @@ _output/bin/$(GOOS)/$(GOARCH)/$(BIN): build-dirs
|
|||
VERSION=$(VERSION) \
|
||||
PKG=$(PKG) \
|
||||
BIN=$(BIN) \
|
||||
OUTPUT_DIR=/output/$(GOOS)/$(GOARCH) \
|
||||
./hack/build.sh'"
|
||||
|
||||
TTY := $(shell tty -s && echo "-t")
|
||||
|
@ -181,3 +182,8 @@ container-clean:
|
|||
|
||||
bin-clean:
|
||||
rm -rf .go _output
|
||||
|
||||
ci:
|
||||
hack/verify-all.sh
|
||||
hack/test.sh $(SRC_DIRS)
|
||||
GOOS=$(GOOS) GOARCH=$(GOARCH) VERSION=$(VERSION) PKG=$(PKG) BIN=$(BIN) ./hack/build.sh
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/bin/sh
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
|
@ -53,7 +53,10 @@ LDFLAGS="-X ${PKG}/pkg/buildinfo.Version=${VERSION}"
|
|||
LDFLAGS="${LDFLAGS} -X ${PKG}/pkg/buildinfo.GitSHA=${GIT_SHA}"
|
||||
LDFLAGS="${LDFLAGS} -X ${PKG}/pkg/buildinfo.GitTreeState=${GIT_TREE_STATE}"
|
||||
|
||||
OUTPUT=/output/${GOOS}/${GOARCH}/${BIN}
|
||||
if [[ -z "${OUTPUT_DIR:-}" ]]; then
|
||||
OUTPUT_DIR=.
|
||||
fi
|
||||
OUTPUT=${OUTPUT_DIR}/${BIN}
|
||||
if [[ "${GOOS}" = "windows" ]]; then
|
||||
OUTPUT="${OUTPUT}.exe"
|
||||
fi
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/bin/sh
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
ARK_ROOT=$(realpath $(dirname ${BASH_SOURCE})/..)
|
||||
ARK_ROOT=$(dirname ${BASH_SOURCE})/..
|
||||
BIN=${ARK_ROOT}/_output/bin
|
||||
mkdir -p ${BIN}
|
||||
go build -o ${BIN}/client-gen ./vendor/k8s.io/kubernetes/cmd/libs/go2idl/client-gen
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
ARK_ROOT=$(realpath $(dirname ${BASH_SOURCE})/..)
|
||||
ARK_ROOT=$(dirname ${BASH_SOURCE})/..
|
||||
BIN=${ARK_ROOT}/_output/bin
|
||||
mkdir -p ${BIN}
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
ARK_ROOT=$(realpath $(dirname ${BASH_SOURCE})/..)
|
||||
ARK_ROOT=$(dirname ${BASH_SOURCE})/..
|
||||
BIN=${ARK_ROOT}/_output/bin
|
||||
mkdir -p ${BIN}
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
ARK_ROOT=$(realpath $(dirname ${BASH_SOURCE})/..)
|
||||
ARK_ROOT=$(dirname ${BASH_SOURCE})/..
|
||||
BIN=${ARK_ROOT}/_output/bin
|
||||
mkdir -p ${BIN}
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
ARK_ROOT=$(realpath $(dirname ${BASH_SOURCE})/..)
|
||||
ARK_ROOT=$(dirname ${BASH_SOURCE})/..
|
||||
HACK_DIR=$(dirname "${BASH_SOURCE}")
|
||||
DOCS_DIR=${ARK_ROOT}/docs/cli-reference
|
||||
TMP_DIR="$(mktemp -d)"
|
||||
|
|
|
@ -230,7 +230,7 @@ func TestGetNamespaceIncludesExcludes(t *testing.T) {
|
|||
var (
|
||||
v1Group = &metav1.APIResourceList{
|
||||
GroupVersion: "v1",
|
||||
APIResources: []metav1.APIResource{configMapsResource, podsResource},
|
||||
APIResources: []metav1.APIResource{configMapsResource, podsResource, namespacesResource},
|
||||
}
|
||||
|
||||
configMapsResource = metav1.APIResource{
|
||||
|
@ -266,6 +266,14 @@ var (
|
|||
Verbs: metav1.Verbs([]string{"create", "update", "get", "list", "watch", "delete"}),
|
||||
}
|
||||
|
||||
namespacesResource = metav1.APIResource{
|
||||
Name: "namespaces",
|
||||
SingularName: "namespace",
|
||||
Namespaced: false,
|
||||
Kind: "Namespace",
|
||||
Verbs: metav1.Verbs([]string{"create", "update", "get", "list", "watch", "delete"}),
|
||||
}
|
||||
|
||||
certificatesGroup = &metav1.APIResourceList{
|
||||
GroupVersion: "certificates.k8s.io/v1beta1",
|
||||
APIResources: []metav1.APIResource{certificateSigningRequestsResource},
|
||||
|
|
|
@ -103,6 +103,7 @@ type defaultItemBackupper struct {
|
|||
}
|
||||
|
||||
var podsGroupResource = schema.GroupResource{Group: "", Resource: "pods"}
|
||||
var namespacesGroupResource = schema.GroupResource{Group: "", Resource: "namespaces"}
|
||||
|
||||
// backupItem backs up an individual item to tarWriter. The item may be excluded based on the
|
||||
// namespaces IncludesExcludes list.
|
||||
|
@ -127,7 +128,9 @@ func (ib *defaultItemBackupper) backupItem(logger *logrus.Entry, obj runtime.Uns
|
|||
return nil
|
||||
}
|
||||
|
||||
if namespace == "" && ib.backup.Spec.IncludeClusterResources != nil && !*ib.backup.Spec.IncludeClusterResources {
|
||||
// NOTE: we specifically allow namespaces to be backed up even if IncludeClusterResources is
|
||||
// false.
|
||||
if namespace == "" && groupResource != namespacesGroupResource && ib.backup.Spec.IncludeClusterResources != nil && !*ib.backup.Spec.IncludeClusterResources {
|
||||
log.Info("Excluding item because resource is cluster-scoped and backup.spec.includeClusterResources is false")
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -103,6 +103,23 @@ func TestBackupItemSkips(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestBackupItemSkipsClusterScopedResourceWhenIncludeClusterResourcesFalse(t *testing.T) {
|
||||
f := false
|
||||
ib := &defaultItemBackupper{
|
||||
backup: &v1.Backup{
|
||||
Spec: v1.BackupSpec{
|
||||
IncludeClusterResources: &f,
|
||||
},
|
||||
},
|
||||
namespaces: collections.NewIncludesExcludes(),
|
||||
resources: collections.NewIncludesExcludes(),
|
||||
}
|
||||
|
||||
u := unstructuredOrDie(`{"apiVersion":"v1","kind":"Foo","metadata":{"name":"bar"}}`)
|
||||
err := ib.backupItem(arktest.NewLogger(), u, schema.GroupResource{Group: "foo", Resource: "bar"})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestBackupItemNoSkips(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
kuberrs "k8s.io/apimachinery/pkg/util/errors"
|
||||
|
@ -122,24 +123,27 @@ func (rb *defaultResourceBackupper) backupResource(
|
|||
|
||||
log := rb.log.WithField("groupResource", grString)
|
||||
|
||||
switch {
|
||||
case rb.backup.Spec.IncludeClusterResources == nil:
|
||||
// when IncludeClusterResources == nil (auto), only directly
|
||||
// back up cluster-scoped resources if we're doing a full-cluster
|
||||
// (all namespaces) backup. Note that in the case of a subset of
|
||||
// namespaces being backed up, some related cluster-scoped resources
|
||||
// may still be backed up if triggered by a custom action (e.g. PVC->PV).
|
||||
if !resource.Namespaced && !rb.namespaces.IncludeEverything() {
|
||||
log.Info("Skipping resource because it's cluster-scoped and only specific namespaces are included in the backup")
|
||||
return nil
|
||||
}
|
||||
case *rb.backup.Spec.IncludeClusterResources == false:
|
||||
if !resource.Namespaced {
|
||||
clusterScoped := !resource.Namespaced
|
||||
|
||||
// If the resource we are backing up is NOT namespaces, and it is cluster-scoped, check to see if
|
||||
// we should include it based on the IncludeClusterResources setting.
|
||||
if gr != namespacesGroupResource && clusterScoped {
|
||||
if rb.backup.Spec.IncludeClusterResources == nil {
|
||||
if !rb.namespaces.IncludeEverything() {
|
||||
// when IncludeClusterResources == nil (auto), only directly
|
||||
// back up cluster-scoped resources if we're doing a full-cluster
|
||||
// (all namespaces) backup. Note that in the case of a subset of
|
||||
// namespaces being backed up, some related cluster-scoped resources
|
||||
// may still be backed up if triggered by a custom action (e.g. PVC->PV).
|
||||
// If we're processing namespaces themselves, we will not skip here, they may be
|
||||
// filtered out later.
|
||||
log.Info("Skipping resource because it's cluster-scoped and only specific namespaces are included in the backup")
|
||||
return nil
|
||||
}
|
||||
} else if !*rb.backup.Spec.IncludeClusterResources {
|
||||
log.Info("Skipping resource because it's cluster-scoped")
|
||||
return nil
|
||||
}
|
||||
case *rb.backup.Spec.IncludeClusterResources == true:
|
||||
// include the resource, no action required
|
||||
}
|
||||
|
||||
if !rb.resources.ShouldInclude(grString) {
|
||||
|
@ -173,12 +177,50 @@ func (rb *defaultResourceBackupper) backupResource(
|
|||
rb.discoveryHelper,
|
||||
)
|
||||
|
||||
var namespacesToList []string
|
||||
if resource.Namespaced {
|
||||
namespacesToList = getNamespacesToList(rb.namespaces)
|
||||
} else {
|
||||
namespacesToList := getNamespacesToList(rb.namespaces)
|
||||
|
||||
// Check if we're backing up namespaces, and only certain ones
|
||||
if gr == namespacesGroupResource && namespacesToList[0] != "" {
|
||||
resourceClient, err := rb.dynamicFactory.ClientForGroupVersionResource(gv, resource, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var labelSelector labels.Selector
|
||||
if rb.backup.Spec.LabelSelector != nil {
|
||||
labelSelector, err = metav1.LabelSelectorAsSelector(rb.backup.Spec.LabelSelector)
|
||||
if err != nil {
|
||||
// This should never happen...
|
||||
return errors.Wrap(err, "invalid label selector")
|
||||
}
|
||||
}
|
||||
|
||||
for _, ns := range namespacesToList {
|
||||
unstructured, err := resourceClient.Get(ns, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
errs = append(errs, errors.Wrap(err, "error getting namespace"))
|
||||
continue
|
||||
}
|
||||
|
||||
labels := labels.Set(unstructured.GetLabels())
|
||||
if labelSelector != nil && !labelSelector.Matches(labels) {
|
||||
log.WithField("name", unstructured.GetName()).Info("skipping item because it does not match the backup's label selector")
|
||||
continue
|
||||
}
|
||||
|
||||
if err := itemBackupper.backupItem(log, unstructured, gr); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
return kuberrs.NewAggregate(errs)
|
||||
}
|
||||
|
||||
// If we get here, we're backing up something other than namespaces
|
||||
if clusterScoped {
|
||||
namespacesToList = []string{""}
|
||||
}
|
||||
|
||||
for _, namespace := range namespacesToList {
|
||||
resourceClient, err := rb.dynamicFactory.ClientForGroupVersionResource(gv, resource, namespace)
|
||||
if err != nil {
|
||||
|
@ -203,6 +245,17 @@ func (rb *defaultResourceBackupper) backupResource(
|
|||
continue
|
||||
}
|
||||
|
||||
metadata, err := meta.Accessor(unstructured)
|
||||
if err != nil {
|
||||
errs = append(errs, errors.Wrapf(err, "unable to get a metadata accessor"))
|
||||
continue
|
||||
}
|
||||
|
||||
if gr == namespacesGroupResource && !rb.namespaces.ShouldInclude(metadata.GetName()) {
|
||||
log.WithField("name", metadata.GetName()).Info("skipping namespace because it is excluded")
|
||||
continue
|
||||
}
|
||||
|
||||
if err := itemBackupper.backupItem(log, unstructured, gr); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"github.com/heptio/ark/pkg/discovery"
|
||||
"github.com/heptio/ark/pkg/util/collections"
|
||||
arktest "github.com/heptio/ark/pkg/util/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -50,6 +51,7 @@ func TestBackupResource(t *testing.T) {
|
|||
groupVersion schema.GroupVersion
|
||||
groupResource schema.GroupResource
|
||||
listResponses [][]*unstructured.Unstructured
|
||||
getResponses []*unstructured.Unstructured
|
||||
includeClusterResources *bool
|
||||
}{
|
||||
{
|
||||
|
@ -195,6 +197,22 @@ func TestBackupResource(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "should include specified namespaces if backing up subset of namespaces and --include-cluster-resources=nil",
|
||||
namespaces: collections.NewIncludesExcludes().Includes("ns-1", "ns-2"),
|
||||
resources: collections.NewIncludesExcludes(),
|
||||
includeClusterResources: nil,
|
||||
expectedListedNamespaces: []string{"ns-1", "ns-2"},
|
||||
apiGroup: v1Group,
|
||||
apiResource: namespacesResource,
|
||||
groupVersion: schema.GroupVersion{Group: "", Version: "v1"},
|
||||
groupResource: schema.GroupResource{Group: "", Resource: "namespaces"},
|
||||
expectSkip: false,
|
||||
getResponses: []*unstructured.Unstructured{
|
||||
unstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-1"}}`),
|
||||
unstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-2"}}`),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
@ -271,23 +289,38 @@ func TestBackupResource(t *testing.T) {
|
|||
discoveryHelper,
|
||||
).Return(itemBackupper)
|
||||
|
||||
for i, namespace := range test.expectedListedNamespaces {
|
||||
if len(test.listResponses) > 0 {
|
||||
for i, namespace := range test.expectedListedNamespaces {
|
||||
client := &arktest.FakeDynamicClient{}
|
||||
defer client.AssertExpectations(t)
|
||||
|
||||
dynamicFactory.On("ClientForGroupVersionResource", test.groupVersion, test.apiResource, namespace).Return(client, nil)
|
||||
|
||||
list := &unstructured.UnstructuredList{
|
||||
Items: []unstructured.Unstructured{},
|
||||
}
|
||||
for _, item := range test.listResponses[i] {
|
||||
list.Items = append(list.Items, *item)
|
||||
itemBackupper.On("backupItem", mock.AnythingOfType("*logrus.Entry"), item, test.groupResource).Return(nil)
|
||||
}
|
||||
client.On("List", metav1.ListOptions{LabelSelector: labelSelector}).Return(list, nil)
|
||||
}
|
||||
}
|
||||
|
||||
if len(test.getResponses) > 0 {
|
||||
client := &arktest.FakeDynamicClient{}
|
||||
defer client.AssertExpectations(t)
|
||||
|
||||
dynamicFactory.On("ClientForGroupVersionResource", test.groupVersion, test.apiResource, namespace).Return(client, nil)
|
||||
dynamicFactory.On("ClientForGroupVersionResource", test.groupVersion, test.apiResource, "").Return(client, nil)
|
||||
|
||||
list := &unstructured.UnstructuredList{
|
||||
Items: []unstructured.Unstructured{},
|
||||
}
|
||||
for _, item := range test.listResponses[i] {
|
||||
list.Items = append(list.Items, *item)
|
||||
for i, namespace := range test.expectedListedNamespaces {
|
||||
item := test.getResponses[i]
|
||||
client.On("Get", namespace, metav1.GetOptions{}).Return(item, nil)
|
||||
itemBackupper.On("backupItem", mock.AnythingOfType("*logrus.Entry"), item, test.groupResource).Return(nil)
|
||||
}
|
||||
client.On("List", metav1.ListOptions{LabelSelector: labelSelector}).Return(list, nil)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
err := rb.backupResource(test.apiGroup, test.apiResource)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
@ -427,6 +460,180 @@ func TestBackupResourceCohabitation(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestBackupResourceOnlyIncludesSpecifiedNamespaces(t *testing.T) {
|
||||
backup := &v1.Backup{}
|
||||
|
||||
namespaces := collections.NewIncludesExcludes().Includes("ns-1")
|
||||
resources := collections.NewIncludesExcludes().Includes("*")
|
||||
|
||||
labelSelector := "foo=bar"
|
||||
backedUpItems := map[itemKey]struct{}{}
|
||||
|
||||
dynamicFactory := &arktest.FakeDynamicFactory{}
|
||||
defer dynamicFactory.AssertExpectations(t)
|
||||
|
||||
discoveryHelper := arktest.NewFakeDiscoveryHelper(true, nil)
|
||||
|
||||
cohabitatingResources := map[string]*cohabitatingResource{}
|
||||
|
||||
actions := map[schema.GroupResource]Action{}
|
||||
|
||||
resourceHooks := []resourceHook{}
|
||||
|
||||
podCommandExecutor := &mockPodCommandExecutor{}
|
||||
defer podCommandExecutor.AssertExpectations(t)
|
||||
|
||||
tarWriter := &fakeTarWriter{}
|
||||
|
||||
rb := (&defaultResourceBackupperFactory{}).newResourceBackupper(
|
||||
arktest.NewLogger(),
|
||||
backup,
|
||||
namespaces,
|
||||
resources,
|
||||
labelSelector,
|
||||
dynamicFactory,
|
||||
discoveryHelper,
|
||||
backedUpItems,
|
||||
cohabitatingResources,
|
||||
actions,
|
||||
podCommandExecutor,
|
||||
tarWriter,
|
||||
resourceHooks,
|
||||
).(*defaultResourceBackupper)
|
||||
|
||||
itemBackupperFactory := &mockItemBackupperFactory{}
|
||||
defer itemBackupperFactory.AssertExpectations(t)
|
||||
rb.itemBackupperFactory = itemBackupperFactory
|
||||
|
||||
itemHookHandler := &mockItemHookHandler{}
|
||||
defer itemHookHandler.AssertExpectations(t)
|
||||
|
||||
itemBackupper := &defaultItemBackupper{
|
||||
backup: backup,
|
||||
namespaces: namespaces,
|
||||
resources: resources,
|
||||
backedUpItems: backedUpItems,
|
||||
actions: actions,
|
||||
tarWriter: tarWriter,
|
||||
resourceHooks: resourceHooks,
|
||||
dynamicFactory: dynamicFactory,
|
||||
discoveryHelper: discoveryHelper,
|
||||
itemHookHandler: itemHookHandler,
|
||||
}
|
||||
|
||||
itemBackupperFactory.On("newItemBackupper",
|
||||
backup,
|
||||
namespaces,
|
||||
resources,
|
||||
backedUpItems,
|
||||
actions,
|
||||
podCommandExecutor,
|
||||
tarWriter,
|
||||
resourceHooks,
|
||||
dynamicFactory,
|
||||
discoveryHelper,
|
||||
).Return(itemBackupper)
|
||||
|
||||
client := &arktest.FakeDynamicClient{}
|
||||
defer client.AssertExpectations(t)
|
||||
|
||||
coreV1Group := schema.GroupVersion{Group: "", Version: "v1"}
|
||||
dynamicFactory.On("ClientForGroupVersionResource", coreV1Group, namespacesResource, "").Return(client, nil)
|
||||
ns1 := unstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-1"}}`)
|
||||
client.On("Get", "ns-1", metav1.GetOptions{}).Return(ns1, nil)
|
||||
|
||||
itemHookHandler.On("handleHooks", mock.Anything, schema.GroupResource{Group: "", Resource: "namespaces"}, ns1, resourceHooks).Return(nil)
|
||||
|
||||
err := rb.backupResource(v1Group, namespacesResource)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, tarWriter.headers, 1)
|
||||
assert.Equal(t, "resources/namespaces/cluster/ns-1.json", tarWriter.headers[0].Name)
|
||||
}
|
||||
|
||||
func TestBackupResourceListAllNamespacesExcludesCorrectly(t *testing.T) {
|
||||
backup := &v1.Backup{}
|
||||
|
||||
namespaces := collections.NewIncludesExcludes().Excludes("ns-1")
|
||||
resources := collections.NewIncludesExcludes().Includes("*")
|
||||
|
||||
labelSelector := "foo=bar"
|
||||
backedUpItems := map[itemKey]struct{}{}
|
||||
|
||||
dynamicFactory := &arktest.FakeDynamicFactory{}
|
||||
defer dynamicFactory.AssertExpectations(t)
|
||||
|
||||
discoveryHelper := arktest.NewFakeDiscoveryHelper(true, nil)
|
||||
|
||||
cohabitatingResources := map[string]*cohabitatingResource{}
|
||||
|
||||
actions := map[schema.GroupResource]Action{}
|
||||
|
||||
resourceHooks := []resourceHook{}
|
||||
|
||||
podCommandExecutor := &mockPodCommandExecutor{}
|
||||
defer podCommandExecutor.AssertExpectations(t)
|
||||
|
||||
tarWriter := &fakeTarWriter{}
|
||||
|
||||
rb := (&defaultResourceBackupperFactory{}).newResourceBackupper(
|
||||
arktest.NewLogger(),
|
||||
backup,
|
||||
namespaces,
|
||||
resources,
|
||||
labelSelector,
|
||||
dynamicFactory,
|
||||
discoveryHelper,
|
||||
backedUpItems,
|
||||
cohabitatingResources,
|
||||
actions,
|
||||
podCommandExecutor,
|
||||
tarWriter,
|
||||
resourceHooks,
|
||||
).(*defaultResourceBackupper)
|
||||
|
||||
itemBackupperFactory := &mockItemBackupperFactory{}
|
||||
defer itemBackupperFactory.AssertExpectations(t)
|
||||
rb.itemBackupperFactory = itemBackupperFactory
|
||||
|
||||
itemHookHandler := &mockItemHookHandler{}
|
||||
defer itemHookHandler.AssertExpectations(t)
|
||||
|
||||
itemBackupper := &mockItemBackupper{}
|
||||
defer itemBackupper.AssertExpectations(t)
|
||||
|
||||
itemBackupperFactory.On("newItemBackupper",
|
||||
backup,
|
||||
namespaces,
|
||||
resources,
|
||||
backedUpItems,
|
||||
actions,
|
||||
podCommandExecutor,
|
||||
tarWriter,
|
||||
resourceHooks,
|
||||
dynamicFactory,
|
||||
discoveryHelper,
|
||||
).Return(itemBackupper)
|
||||
|
||||
client := &arktest.FakeDynamicClient{}
|
||||
defer client.AssertExpectations(t)
|
||||
|
||||
coreV1Group := schema.GroupVersion{Group: "", Version: "v1"}
|
||||
dynamicFactory.On("ClientForGroupVersionResource", coreV1Group, namespacesResource, "").Return(client, nil)
|
||||
|
||||
ns1 := unstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-1"}}`)
|
||||
ns2 := unstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-2"}}`)
|
||||
list := &unstructured.UnstructuredList{
|
||||
Items: []unstructured.Unstructured{*ns1, *ns2},
|
||||
}
|
||||
client.On("List", metav1.ListOptions{LabelSelector: labelSelector}).Return(list, nil)
|
||||
|
||||
itemBackupper.On("backupItem", mock.AnythingOfType("*logrus.Entry"), ns2, namespacesGroupResource).Return(nil)
|
||||
|
||||
err := rb.backupResource(v1Group, namespacesResource)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
type mockItemBackupperFactory struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
@ -456,289 +663,3 @@ func (ibf *mockItemBackupperFactory) newItemBackupper(
|
|||
)
|
||||
return args.Get(0).(ItemBackupper)
|
||||
}
|
||||
|
||||
/*
|
||||
func TestBackupResource2(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
resourceIncludesExcludes *collections.IncludesExcludes
|
||||
resourceGroup string
|
||||
resourceVersion string
|
||||
resourceGV string
|
||||
resourceName string
|
||||
resourceNamespaced bool
|
||||
namespaceIncludesExcludes *collections.IncludesExcludes
|
||||
expectedListedNamespaces []string
|
||||
lists []string
|
||||
labelSelector string
|
||||
actions map[string]Action
|
||||
expectedActionIDs map[string][]string
|
||||
deploymentsBackedUp bool
|
||||
expectedDeploymentsBackedUp bool
|
||||
networkPoliciesBackedUp bool
|
||||
expectedNetworkPoliciesBackedUp bool
|
||||
}{
|
||||
{
|
||||
name: "should not include resource",
|
||||
resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("pods"),
|
||||
resourceGV: "v1",
|
||||
resourceName: "secrets",
|
||||
resourceNamespaced: true,
|
||||
},
|
||||
{
|
||||
name: "should skip deployments.extensions if we've seen deployments.apps",
|
||||
resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
|
||||
resourceGV: "extensions/v1beta1",
|
||||
resourceName: "deployments",
|
||||
resourceNamespaced: true,
|
||||
deploymentsBackedUp: true,
|
||||
expectedDeploymentsBackedUp: true,
|
||||
},
|
||||
{
|
||||
name: "should skip deployments.apps if we've seen deployments.extensions",
|
||||
resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
|
||||
resourceGV: "apps/v1beta1",
|
||||
resourceName: "deployments",
|
||||
resourceNamespaced: true,
|
||||
deploymentsBackedUp: true,
|
||||
expectedDeploymentsBackedUp: true,
|
||||
},
|
||||
{
|
||||
name: "should skip networkpolicies.extensions if we've seen networkpolicies.networking.k8s.io",
|
||||
resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
|
||||
resourceGV: "extensions/v1beta1",
|
||||
resourceName: "networkpolicies",
|
||||
resourceNamespaced: true,
|
||||
networkPoliciesBackedUp: true,
|
||||
expectedNetworkPoliciesBackedUp: true,
|
||||
},
|
||||
{
|
||||
name: "should skip networkpolicies.networking.k8s.io if we've seen networkpolicies.extensions",
|
||||
resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
|
||||
resourceGV: "networking.k8s.io/v1",
|
||||
resourceName: "networkpolicies",
|
||||
resourceNamespaced: true,
|
||||
networkPoliciesBackedUp: true,
|
||||
expectedNetworkPoliciesBackedUp: true,
|
||||
},
|
||||
{
|
||||
name: "list per namespace when not including *",
|
||||
resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
|
||||
resourceGroup: "apps",
|
||||
resourceVersion: "v1beta1",
|
||||
resourceGV: "apps/v1beta1",
|
||||
resourceName: "deployments",
|
||||
resourceNamespaced: true,
|
||||
namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("a", "b"),
|
||||
expectedListedNamespaces: []string{"a", "b"},
|
||||
lists: []string{
|
||||
`{
|
||||
"apiVersion": "apps/v1beta1",
|
||||
"kind": "DeploymentList",
|
||||
"items": [
|
||||
{
|
||||
"metadata": {
|
||||
"namespace": "a",
|
||||
"name": "1"
|
||||
}
|
||||
}
|
||||
]
|
||||
}`,
|
||||
`{
|
||||
"apiVersion": "apps/v1beta1v1",
|
||||
"kind": "DeploymentList",
|
||||
"items": [
|
||||
{
|
||||
"metadata": {
|
||||
"namespace": "b",
|
||||
"name": "2"
|
||||
}
|
||||
}
|
||||
]
|
||||
}`,
|
||||
},
|
||||
expectedDeploymentsBackedUp: true,
|
||||
},
|
||||
{
|
||||
name: "list all namespaces when including *",
|
||||
resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
|
||||
resourceGroup: "networking.k8s.io",
|
||||
resourceVersion: "v1",
|
||||
resourceGV: "networking.k8s.io/v1",
|
||||
resourceName: "networkpolicies",
|
||||
resourceNamespaced: true,
|
||||
namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
|
||||
expectedListedNamespaces: []string{""},
|
||||
lists: []string{
|
||||
`{
|
||||
"apiVersion": "networking.k8s.io/v1",
|
||||
"kind": "NetworkPolicyList",
|
||||
"items": [
|
||||
{
|
||||
"metadata": {
|
||||
"namespace": "a",
|
||||
"name": "1"
|
||||
}
|
||||
}
|
||||
]
|
||||
}`,
|
||||
},
|
||||
expectedNetworkPoliciesBackedUp: true,
|
||||
},
|
||||
{
|
||||
name: "list all namespaces when cluster-scoped, even with namespace includes",
|
||||
resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
|
||||
resourceGroup: "certificates.k8s.io",
|
||||
resourceVersion: "v1beta1",
|
||||
resourceGV: "certificates.k8s.io/v1beta1",
|
||||
resourceName: "certificatesigningrequests",
|
||||
resourceNamespaced: false,
|
||||
namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("a"),
|
||||
expectedListedNamespaces: []string{""},
|
||||
labelSelector: "a=b",
|
||||
lists: []string{
|
||||
`{
|
||||
"apiVersion": "certifiaces.k8s.io/v1beta1",
|
||||
"kind": "CertificateSigningRequestList",
|
||||
"items": [
|
||||
{
|
||||
"metadata": {
|
||||
"name": "1",
|
||||
"labels": {
|
||||
"a": "b"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}`,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "use a custom action",
|
||||
resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
|
||||
resourceGroup: "certificates.k8s.io",
|
||||
resourceVersion: "v1beta1",
|
||||
resourceGV: "certificates.k8s.io/v1beta1",
|
||||
resourceName: "certificatesigningrequests",
|
||||
resourceNamespaced: false,
|
||||
namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("a"),
|
||||
expectedListedNamespaces: []string{""},
|
||||
labelSelector: "a=b",
|
||||
lists: []string{
|
||||
`{
|
||||
"apiVersion": "certificates.k8s.io/v1beta1",
|
||||
"kind": "CertificateSigningRequestList",
|
||||
"items": [
|
||||
{
|
||||
"metadata": {
|
||||
"name": "1",
|
||||
"labels": {
|
||||
"a": "b"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}`,
|
||||
},
|
||||
actions: map[string]Action{
|
||||
"certificatesigningrequests": &fakeAction{},
|
||||
"other": &fakeAction{},
|
||||
},
|
||||
expectedActionIDs: map[string][]string{
|
||||
"certificatesigningrequests": {"1"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
var labelSelector *metav1.LabelSelector
|
||||
if test.labelSelector != "" {
|
||||
s, err := metav1.ParseToLabelSelector(test.labelSelector)
|
||||
require.NoError(t, err)
|
||||
labelSelector = s
|
||||
}
|
||||
|
||||
log, _ := testlogger.NewNullLogger()
|
||||
|
||||
ctx := &backupContext{
|
||||
backup: &v1.Backup{
|
||||
Spec: v1.BackupSpec{
|
||||
LabelSelector: labelSelector,
|
||||
},
|
||||
},
|
||||
resourceIncludesExcludes: test.resourceIncludesExcludes,
|
||||
namespaceIncludesExcludes: test.namespaceIncludesExcludes,
|
||||
deploymentsBackedUp: test.deploymentsBackedUp,
|
||||
networkPoliciesBackedUp: test.networkPoliciesBackedUp,
|
||||
logger: log,
|
||||
}
|
||||
|
||||
group := &metav1.APIResourceList{
|
||||
GroupVersion: test.resourceGV,
|
||||
}
|
||||
|
||||
resource := metav1.APIResource{Name: test.resourceName, Namespaced: test.resourceNamespaced}
|
||||
|
||||
itemBackupper := &mockItemBackupper{}
|
||||
|
||||
var actualActionIDs map[string][]string
|
||||
|
||||
dynamicFactory := &arktest.FakeDynamicFactory{}
|
||||
gvr := schema.GroupVersionResource{Group: test.resourceGroup, Version: test.resourceVersion}
|
||||
gr := schema.GroupResource{Group: test.resourceGroup, Resource: test.resourceName}
|
||||
for i, namespace := range test.expectedListedNamespaces {
|
||||
obj := toRuntimeObject(t, test.lists[i])
|
||||
|
||||
client := &arktest.FakeDynamicClient{}
|
||||
client.On("List", metav1.ListOptions{LabelSelector: test.labelSelector}).Return(obj, nil)
|
||||
dynamicFactory.On("ClientForGroupVersionResource", gvr, resource, namespace).Return(client, nil)
|
||||
|
||||
action := test.actions[test.resourceName]
|
||||
|
||||
list, err := meta.ExtractList(obj)
|
||||
require.NoError(t, err)
|
||||
for i := range list {
|
||||
item := list[i].(*unstructured.Unstructured)
|
||||
itemBackupper.On("backupItem", ctx, item, gr).Return(nil)
|
||||
if action != nil {
|
||||
a, err := meta.Accessor(item)
|
||||
require.NoError(t, err)
|
||||
ns := a.GetNamespace()
|
||||
name := a.GetName()
|
||||
id := ns
|
||||
if id != "" {
|
||||
id += "/"
|
||||
}
|
||||
id += name
|
||||
if actualActionIDs == nil {
|
||||
actualActionIDs = make(map[string][]string)
|
||||
}
|
||||
actualActionIDs[test.resourceName] = append(actualActionIDs[test.resourceName], id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resources := map[schema.GroupVersionResource]schema.GroupVersionResource{
|
||||
schema.GroupVersionResource{Resource: "certificatesigningrequests"}: schema.GroupVersionResource{Group: "certificates.k8s.io", Version: "v1beta1", Resource: "certificatesigningrequests"},
|
||||
schema.GroupVersionResource{Resource: "other"}: schema.GroupVersionResource{Group: "somegroup", Version: "someversion", Resource: "otherthings"},
|
||||
}
|
||||
discoveryHelper := arktest.NewFakeDiscoveryHelper(false, resources)
|
||||
|
||||
podCommandExecutor := &arktest.PodCommandExecutor{}
|
||||
defer podCommandExecutor.AssertExpectations(t)
|
||||
|
||||
kb, err := NewKubernetesBackupper(discoveryHelper, dynamicFactory, test.actions, podCommandExecutor)
|
||||
require.NoError(t, err)
|
||||
backupper := kb.(*kubernetesBackupper)
|
||||
backupper.itemBackupper = itemBackupper
|
||||
|
||||
err = backupper.backupResource(ctx, group, resource)
|
||||
|
||||
assert.Equal(t, test.expectedDeploymentsBackedUp, ctx.deploymentsBackedUp)
|
||||
assert.Equal(t, test.expectedNetworkPoliciesBackedUp, ctx.networkPoliciesBackedUp)
|
||||
assert.Equal(t, test.expectedActionIDs, actualActionIDs)
|
||||
})
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
|
|
@ -49,7 +49,7 @@ func NewGetCommand(f client.Factory, use string) *cobra.Command {
|
|||
backups.Items = append(backups.Items, *backup)
|
||||
}
|
||||
} else {
|
||||
backups, err = arkClient.ArkV1().Backups(api.DefaultNamespace).List(metav1.ListOptions{})
|
||||
backups, err = arkClient.ArkV1().Backups(api.DefaultNamespace).List(listOptions)
|
||||
cmd.CheckError(err)
|
||||
}
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ func NewGetCommand(f client.Factory, use string) *cobra.Command {
|
|||
restores.Items = append(restores.Items, *restore)
|
||||
}
|
||||
} else {
|
||||
restores, err = arkClient.ArkV1().Restores(api.DefaultNamespace).List(metav1.ListOptions{})
|
||||
restores, err = arkClient.ArkV1().Restores(api.DefaultNamespace).List(listOptions)
|
||||
cmd.CheckError(err)
|
||||
}
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ func NewGetCommand(f client.Factory, use string) *cobra.Command {
|
|||
schedules.Items = append(schedules.Items, *schedule)
|
||||
}
|
||||
} else {
|
||||
schedules, err = arkClient.ArkV1().Schedules(api.DefaultNamespace).List(metav1.ListOptions{})
|
||||
schedules, err = arkClient.ArkV1().Schedules(api.DefaultNamespace).List(listOptions)
|
||||
cmd.CheckError(err)
|
||||
}
|
||||
|
||||
|
|
|
@ -351,7 +351,7 @@ func (ctx *context) restoreFromDir(dir string) (api.RestoreResult, api.RestoreRe
|
|||
continue
|
||||
}
|
||||
|
||||
w, e := ctx.restoreResource(resource.String(), nsName, nsPath)
|
||||
w, e := ctx.restoreResource(resource.String(), mappedNsName, nsPath)
|
||||
merge(&warnings, &w)
|
||||
merge(&errs, &e)
|
||||
}
|
||||
|
|
|
@ -292,6 +292,53 @@ func TestRestorePriority(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestNamespaceRemapping(t *testing.T) {
|
||||
var (
|
||||
baseDir = "bak"
|
||||
restore = &api.Restore{Spec: api.RestoreSpec{IncludedNamespaces: []string{"*"}, NamespaceMapping: map[string]string{"ns-1": "ns-2"}}}
|
||||
prioritizedResources = []schema.GroupResource{{Resource: "configmaps"}}
|
||||
labelSelector = labels.NewSelector()
|
||||
fileSystem = newFakeFileSystem().WithFile("bak/resources/configmaps/namespaces/ns-1/cm-1.json", newTestConfigMap().WithNamespace("ns-1").ToJSON())
|
||||
expectedNS = "ns-2"
|
||||
expectedObjs = toUnstructured(newTestConfigMap().WithNamespace("ns-2").WithArkLabel("").ConfigMap)
|
||||
)
|
||||
|
||||
resourceClient := &FakeDynamicClient{}
|
||||
for i := range expectedObjs {
|
||||
resourceClient.On("Create", &expectedObjs[i]).Return(&expectedObjs[i], nil)
|
||||
}
|
||||
|
||||
dynamicFactory := &FakeDynamicFactory{}
|
||||
resource := metav1.APIResource{Name: "configmaps", Namespaced: true}
|
||||
gv := schema.GroupVersion{Group: "", Version: "v1"}
|
||||
dynamicFactory.On("ClientForGroupVersionResource", gv, resource, expectedNS).Return(resourceClient, nil)
|
||||
|
||||
log, _ := testlogger.NewNullLogger()
|
||||
|
||||
ctx := &context{
|
||||
dynamicFactory: dynamicFactory,
|
||||
fileSystem: fileSystem,
|
||||
selector: labelSelector,
|
||||
namespaceClient: &fakeNamespaceClient{},
|
||||
prioritizedResources: prioritizedResources,
|
||||
restore: restore,
|
||||
backup: &api.Backup{},
|
||||
logger: log,
|
||||
}
|
||||
|
||||
warnings, errors := ctx.restoreFromDir(baseDir)
|
||||
|
||||
assert.Empty(t, warnings.Ark)
|
||||
assert.Empty(t, warnings.Cluster)
|
||||
assert.Empty(t, warnings.Namespaces)
|
||||
assert.Empty(t, errors.Ark)
|
||||
assert.Empty(t, errors.Cluster)
|
||||
assert.Empty(t, errors.Namespaces)
|
||||
|
||||
dynamicFactory.AssertExpectations(t)
|
||||
resourceClient.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestRestoreResourceForNamespace(t *testing.T) {
|
||||
var (
|
||||
trueVal = true
|
||||
|
|
|
@ -45,7 +45,10 @@ func (sr *serviceRestorer) Prepare(obj runtime.Unstructured, restore *api.Restor
|
|||
return nil, nil, err
|
||||
}
|
||||
|
||||
delete(spec, "clusterIP")
|
||||
// Since clusterIP is an optional key, we can ignore 'not found' errors. Also assuming it was a string already.
|
||||
if val, _ := collections.GetString(spec, "clusterIP"); val != "None" {
|
||||
delete(spec, "clusterIP")
|
||||
}
|
||||
|
||||
ports, err := collections.GetSlice(obj.UnstructuredContent(), "spec.ports")
|
||||
if err != nil {
|
||||
|
|
|
@ -42,6 +42,12 @@ func TestServiceRestorerPrepare(t *testing.T) {
|
|||
expectedErr: false,
|
||||
expectedRes: NewTestUnstructured().WithName("svc-1").WithSpec("foo").WithSpecField("ports", []interface{}{}).Unstructured,
|
||||
},
|
||||
{
|
||||
name: "headless clusterIP should not be deleted from spec",
|
||||
obj: NewTestUnstructured().WithName("svc-1").WithSpecField("clusterIP", "None").WithSpecField("ports", []interface{}{}).Unstructured,
|
||||
expectedErr: false,
|
||||
expectedRes: NewTestUnstructured().WithName("svc-1").WithSpecField("clusterIP", "None").WithSpecField("ports", []interface{}{}).Unstructured,
|
||||
},
|
||||
{
|
||||
name: "nodePort (only) should be deleted from all spec.ports",
|
||||
obj: NewTestUnstructured().WithName("svc-1").
|
||||
|
|
Loading…
Reference in New Issue