Restructure backups for resource prioritization.

Previously the directory structure separated resources depending on
whether or not they were cluster or namespace scoped. All cluster
resources were restored first, then all namespace resources. Priority
did not apply across both and you could not order any namespace
resources before any cluster resources.

This restructure sorts firstly on resource type.

resources/serviceaccounts/namespaces/ns1.json
resources/nodes/cluster/node1.json

This will break old backups as the format is no longer consistent as
announced on the Google group.

Signed-off-by: Devan Goodwin <dgoodwin@redhat.com>
pull/132/head
Devan Goodwin 2017-10-10 15:43:53 -03:00
parent 4fe50ed782
commit ed0194c09b
6 changed files with 228 additions and 221 deletions

View File

@ -62,24 +62,36 @@ Note that this file includes detailed info about your volume snapshots in the `s
When unzipped, a typical backup directory (e.g. `backup1234.tar.gz`) looks like the following:
```
cluster/
resources/
persistentvolumes/
cluster/
pv01.json
...
configmaps/
namespaces/
namespace1/
configmaps/
myconfigmap.json
...
pods
namespace2/
...
pods/
namespaces/
namespace1/
mypod.json
...
jobs
namespace2/
...
jobs/
namespaces/
namespace1/
awesome-job.json
...
deployments
cool-deployment.json
namespace2/
...
deployments/
namespaces/
namespace1/
cool-deployment.json
...
namespace2/
...

View File

@ -21,6 +21,10 @@ const (
// the Ark server and API objects.
DefaultNamespace = "heptio-ark"
// ResourcesDir is a top-level directory expected in backups which contains sub-directories
// for each resource type in the backup.
ResourcesDir = "resources"
// RestoreLabelKey is the label key that's applied to all resources that
// are created during a restore. This is applied for ease of identification
// of restored resources. The value will be the restore's name.

View File

@ -22,6 +22,7 @@ import (
"encoding/json"
"fmt"
"io"
"path/filepath"
"strings"
"time"
@ -473,9 +474,9 @@ func (ib *realItemBackupper) backupItem(ctx *backupContext, item map[string]inte
var filePath string
if namespace != "" {
filePath = strings.Join([]string{api.NamespaceScopedDir, namespace, groupResource.String(), name + ".json"}, "/")
filePath = filepath.Join(api.ResourcesDir, groupResource.String(), api.NamespaceScopedDir, namespace, name+".json")
} else {
filePath = strings.Join([]string{api.ClusterScopedDir, groupResource.String(), name + ".json"}, "/")
filePath = filepath.Join(api.ResourcesDir, groupResource.String(), api.ClusterScopedDir, name+".json")
}
itemBytes, err := json.Marshal(item)

View File

@ -439,14 +439,14 @@ func TestBackupMethod(t *testing.T) {
require.NoError(t, err)
expectedFiles := sets.NewString(
"namespaces/a/configmaps/configMap1.json",
"namespaces/b/configmaps/configMap2.json",
"namespaces/a/roles.rbac.authorization.k8s.io/role1.json",
"resources/configmaps/namespaces/a/configMap1.json",
"resources/configmaps/namespaces/b/configMap2.json",
"resources/roles.rbac.authorization.k8s.io/namespaces/a/role1.json",
// CSRs are not expected because they're unrelated cluster-scoped resources
)
expectedData := map[string]string{
"namespaces/a/configmaps/configMap1.json": `
"resources/configmaps/namespaces/a/configMap1.json": `
{
"apiVersion": "v1",
"kind": "ConfigMap",
@ -458,7 +458,7 @@ func TestBackupMethod(t *testing.T) {
"a": "b"
}
}`,
"namespaces/b/configmaps/configMap2.json": `
"resources/configmaps/namespaces/b/configMap2.json": `
{
"apiVersion": "v1",
"kind": "ConfigMap",
@ -471,7 +471,7 @@ func TestBackupMethod(t *testing.T) {
}
}
`,
"namespaces/a/roles.rbac.authorization.k8s.io/role1.json": `
"resources/roles.rbac.authorization.k8s.io/namespaces/a/role1.json": `
{
"apiVersion": "rbac.authorization.k8s.io/v1beta1",
"kind": "Role",
@ -1114,7 +1114,7 @@ func TestBackupItem(t *testing.T) {
namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("foo"),
expectError: false,
expectExcluded: false,
expectedTarHeaderName: "namespaces/foo/resource.group/bar.json",
expectedTarHeaderName: "resources/resource.group/namespaces/foo/bar.json",
},
{
name: "* namespace include",
@ -1122,21 +1122,21 @@ func TestBackupItem(t *testing.T) {
namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
expectError: false,
expectExcluded: false,
expectedTarHeaderName: "namespaces/foo/resource.group/bar.json",
expectedTarHeaderName: "resources/resource.group/namespaces/foo/bar.json",
},
{
name: "cluster-scoped",
item: `{"metadata":{"name":"bar"}}`,
expectError: false,
expectExcluded: false,
expectedTarHeaderName: "cluster/resource.group/bar.json",
expectedTarHeaderName: "resources/resource.group/cluster/bar.json",
},
{
name: "make sure status is deleted",
item: `{"metadata":{"name":"bar"},"spec":{"color":"green"},"status":{"foo":"bar"}}`,
expectError: false,
expectExcluded: false,
expectedTarHeaderName: "cluster/resource.group/bar.json",
expectedTarHeaderName: "resources/resource.group/cluster/bar.json",
},
{
name: "tar header write error",
@ -1156,7 +1156,7 @@ func TestBackupItem(t *testing.T) {
item: `{"metadata":{"name":"bar"}}`,
expectError: false,
expectExcluded: false,
expectedTarHeaderName: "cluster/resource.group/bar.json",
expectedTarHeaderName: "resources/resource.group/cluster/bar.json",
customAction: true,
expectedActionID: "bar",
},
@ -1166,7 +1166,7 @@ func TestBackupItem(t *testing.T) {
item: `{"metadata":{"namespace": "myns", "name":"bar"}}`,
expectError: false,
expectExcluded: false,
expectedTarHeaderName: "namespaces/myns/resource.group/bar.json",
expectedTarHeaderName: "resources/resource.group/namespaces/myns/bar.json",
customAction: true,
expectedActionID: "myns/bar",
},

View File

@ -20,10 +20,10 @@ import (
"archive/tar"
"compress/gzip"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"path"
"path/filepath"
"sort"
@ -257,55 +257,107 @@ func (ctx *context) execute() (api.RestoreResult, api.RestoreResult) {
// restoreFromDir executes a restore based on backup data contained within a local
// directory.
func (ctx *context) restoreFromDir(dir string) (api.RestoreResult, api.RestoreResult) {
warnings, errors := api.RestoreResult{}, api.RestoreResult{}
// cluster-scoped
clusterPath := path.Join(dir, api.ClusterScopedDir)
exists, err := ctx.fileSystem.DirExists(clusterPath)
if err != nil {
errors.Cluster = []string{err.Error()}
}
if exists {
w, e := ctx.restoreNamespace("", clusterPath)
merge(&warnings, &w)
merge(&errors, &e)
}
// namespace-scoped
namespacesPath := path.Join(dir, api.NamespaceScopedDir)
exists, err = ctx.fileSystem.DirExists(namespacesPath)
if err != nil {
addArkError(&errors, err)
return warnings, errors
}
if !exists {
return warnings, errors
}
nses, err := ctx.fileSystem.ReadDir(namespacesPath)
if err != nil {
addArkError(&errors, err)
return warnings, errors
}
warnings, errs := api.RestoreResult{}, api.RestoreResult{}
namespaceFilter := collections.NewIncludesExcludes().Includes(ctx.restore.Spec.IncludedNamespaces...).Excludes(ctx.restore.Spec.ExcludedNamespaces...)
for _, ns := range nses {
if !ns.IsDir() {
continue
}
nsPath := path.Join(namespacesPath, ns.Name())
if !namespaceFilter.ShouldInclude(ns.Name()) {
ctx.infof("Skipping namespace %s", ns.Name())
// Make sure the top level "resources" dir exists:
resourcesDir := filepath.Join(dir, api.ResourcesDir)
rde, err := ctx.fileSystem.DirExists(resourcesDir)
if err != nil {
addArkError(&errs, err)
return warnings, errs
}
if !rde {
addArkError(&errs, errors.New("backup does not contain top level resources directory"))
}
resourceDirs, err := ctx.fileSystem.ReadDir(resourcesDir)
if err != nil {
addArkError(&errs, err)
return warnings, errs
}
resourceDirsMap := make(map[string]os.FileInfo)
for _, rscDir := range resourceDirs {
rscName := rscDir.Name()
resourceDirsMap[rscName] = rscDir
}
for _, resource := range ctx.prioritizedResources {
rscDir := resourceDirsMap[resource.String()]
if rscDir == nil {
continue
}
w, e := ctx.restoreNamespace(ns.Name(), nsPath)
resourcePath := filepath.Join(resourcesDir, rscDir.Name())
clusterSubDir := filepath.Join(resourcePath, api.ClusterScopedDir)
clusterSubDirExists, err := ctx.fileSystem.DirExists(clusterSubDir)
if err != nil {
addArkError(&errs, err)
return warnings, errs
}
if clusterSubDirExists {
w, e := ctx.restoreResource(resource.String(), "", clusterSubDir)
merge(&warnings, &w)
merge(&errors, &e)
merge(&errs, &e)
continue
}
return warnings, errors
nsSubDir := filepath.Join(resourcePath, api.NamespaceScopedDir)
nsSubDirExists, err := ctx.fileSystem.DirExists(nsSubDir)
if err != nil {
addArkError(&errs, err)
return warnings, errs
}
if !nsSubDirExists {
continue
}
nsDirs, err := ctx.fileSystem.ReadDir(nsSubDir)
if err != nil {
addArkError(&errs, err)
return warnings, errs
}
for _, nsDir := range nsDirs {
if !nsDir.IsDir() {
continue
}
nsName := nsDir.Name()
nsPath := filepath.Join(nsSubDir, nsName)
if !namespaceFilter.ShouldInclude(nsName) {
ctx.infof("Skipping namespace %s", nsName)
continue
}
// fetch mapped NS name
mappedNsName := nsName
if target, ok := ctx.restore.Spec.NamespaceMapping[nsName]; ok {
mappedNsName = target
}
// ensure namespace exists
ns := &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: mappedNsName,
},
}
if _, err := kube.EnsureNamespaceExists(ns, ctx.namespaceClient); err != nil {
addArkError(&errs, err)
continue
}
w, e := ctx.restoreResource(resource.String(), nsName, nsPath)
merge(&warnings, &w)
merge(&errs, &e)
}
}
return warnings, errs
}
// merge combines two RestoreResult objects into one
@ -340,94 +392,38 @@ func addToResult(r *api.RestoreResult, ns string, e error) {
}
}
// restoreNamespace restores the resources from a specified namespace directory in the backup,
// or from the cluster-scoped directory if no namespace is specified.
func (ctx *context) restoreNamespace(nsName, nsPath string) (api.RestoreResult, api.RestoreResult) {
warnings, errors := api.RestoreResult{}, api.RestoreResult{}
// restoreResource restores the specified cluster or namespace scoped resource. If namespace is
// empty we are restoring a cluster level resource, otherwise into the specified namespace.
func (ctx *context) restoreResource(resource, namespace, resourcePath string) (api.RestoreResult, api.RestoreResult) {
warnings, errs := api.RestoreResult{}, api.RestoreResult{}
if nsName == "" {
ctx.infof("Restoring cluster-scoped resources")
if namespace != "" {
ctx.infof("Restoring resource '%s' into namespace '%s' from: %s", resource, namespace, resourcePath)
} else {
ctx.infof("Restoring namespace %s", nsName)
ctx.infof("Restoring cluster level resource '%s' from: %s", resource, resourcePath)
}
resourceDirs, err := ctx.fileSystem.ReadDir(nsPath)
if err != nil {
addToResult(&errors, nsName, err)
return warnings, errors
}
resourceDirsMap := make(map[string]os.FileInfo)
for _, rscDir := range resourceDirs {
rscName := rscDir.Name()
resourceDirsMap[rscName] = rscDir
}
if nsName != "" {
// fetch mapped NS name
if target, ok := ctx.restore.Spec.NamespaceMapping[nsName]; ok {
nsName = target
}
// ensure namespace exists
ns := &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: nsName,
},
}
if _, err := kube.EnsureNamespaceExists(ns, ctx.namespaceClient); err != nil {
addArkError(&errors, err)
return warnings, errors
}
}
for _, resource := range ctx.prioritizedResources {
rscDir := resourceDirsMap[resource.String()]
if rscDir == nil {
continue
}
resourcePath := path.Join(nsPath, rscDir.Name())
w, e := ctx.restoreResourceForNamespace(nsName, resourcePath)
merge(&warnings, &w)
merge(&errors, &e)
}
return warnings, errors
}
// restoreResourceForNamespace restores the specified resource type for the specified
// namespace (or blank for cluster-scoped resources).
func (ctx *context) restoreResourceForNamespace(namespace string, resourcePath string) (api.RestoreResult, api.RestoreResult) {
warnings, errors := api.RestoreResult{}, api.RestoreResult{}
resource := path.Base(resourcePath)
ctx.infof("Restoring resource %v into namespace %v", resource, namespace)
files, err := ctx.fileSystem.ReadDir(resourcePath)
if err != nil {
addToResult(&errors, namespace, fmt.Errorf("error reading %q resource directory: %v", resource, err))
return warnings, errors
addToResult(&errs, namespace, fmt.Errorf("error reading %q resource directory: %v", resource, err))
return warnings, errs
}
if len(files) == 0 {
return warnings, errors
return warnings, errs
}
var (
resourceClient client.Dynamic
restorer restorers.ResourceRestorer
waiter *resourceWaiter
groupResource = schema.ParseGroupResource(path.Base(resourcePath))
groupResource = schema.ParseGroupResource(resource)
)
for _, file := range files {
fullPath := filepath.Join(resourcePath, file.Name())
obj, err := ctx.unmarshal(fullPath)
if err != nil {
addToResult(&errors, namespace, fmt.Errorf("error decoding %q: %v", fullPath, err))
addToResult(&errs, namespace, fmt.Errorf("error decoding %q: %v", fullPath, err))
continue
}
@ -448,8 +444,8 @@ func (ctx *context) restoreResourceForNamespace(namespace string, resourcePath s
var err error
resourceClient, err = ctx.dynamicFactory.ClientForGroupVersionKind(obj.GroupVersionKind(), resource, namespace)
if err != nil {
addArkError(&errors, fmt.Errorf("error getting resource client for namespace %q, resource %q: %v", namespace, &groupResource, err))
return warnings, errors
addArkError(&errs, fmt.Errorf("error getting resource client for namespace %q, resource %q: %v", namespace, &groupResource, err))
return warnings, errs
}
restorer = ctx.restorers[groupResource]
@ -463,8 +459,8 @@ func (ctx *context) restoreResourceForNamespace(namespace string, resourcePath s
if restorer.Wait() {
itmWatch, err := resourceClient.Watch(metav1.ListOptions{})
if err != nil {
addArkError(&errors, fmt.Errorf("error watching for namespace %q, resource %q: %v", namespace, &groupResource, err))
return warnings, errors
addArkError(&errs, fmt.Errorf("error watching for namespace %q, resource %q: %v", namespace, &groupResource, err))
return warnings, errs
}
watchChan := itmWatch.ResultChan()
defer itmWatch.Stop()
@ -487,13 +483,13 @@ func (ctx *context) restoreResourceForNamespace(namespace string, resourcePath s
addToResult(&warnings, namespace, fmt.Errorf("warning preparing %s: %v", fullPath, warning))
}
if err != nil {
addToResult(&errors, namespace, fmt.Errorf("error preparing %s: %v", fullPath, err))
addToResult(&errs, namespace, fmt.Errorf("error preparing %s: %v", fullPath, err))
continue
}
unstructuredObj, ok := preparedObj.(*unstructured.Unstructured)
if !ok {
addToResult(&errors, namespace, fmt.Errorf("%s: unexpected type %T", fullPath, preparedObj))
addToResult(&errs, namespace, fmt.Errorf("%s: unexpected type %T", fullPath, preparedObj))
continue
}
@ -511,7 +507,7 @@ func (ctx *context) restoreResourceForNamespace(namespace string, resourcePath s
}
if err != nil {
ctx.infof("error restoring %s: %v", unstructuredObj.GetName(), err)
addToResult(&errors, namespace, fmt.Errorf("error restoring %s: %v", fullPath, err))
addToResult(&errs, namespace, fmt.Errorf("error restoring %s: %v", fullPath, err))
continue
}
@ -522,11 +518,11 @@ func (ctx *context) restoreResourceForNamespace(namespace string, resourcePath s
if waiter != nil {
if err := waiter.Wait(); err != nil {
addArkError(&errors, fmt.Errorf("error waiting for all %v resources to be created in namespace %s: %v", &groupResource, namespace, err))
addArkError(&errs, fmt.Errorf("error waiting for all %v resources to be created in namespace %s: %v", &groupResource, namespace, err))
}
}
return warnings, errors
return warnings, errs
}
// addLabel applies the specified key/value to an object as a label.
@ -604,7 +600,7 @@ func (ctx *context) readBackup(tarRdr *tar.Reader) (string, error) {
return "", err
}
target := path.Join(dir, header.Name)
target := filepath.Join(dir, header.Name)
switch header.Typeflag {
case tar.TypeDir:
@ -616,7 +612,7 @@ func (ctx *context) readBackup(tarRdr *tar.Reader) (string, error) {
case tar.TypeReg:
// make sure we have the directory created
err := ctx.fileSystem.MkdirAll(path.Dir(target), header.FileInfo().Mode())
err := ctx.fileSystem.MkdirAll(filepath.Dir(target), header.FileInfo().Mode())
if err != nil {
ctx.infof("mkdirall error: %v", err)
return "", err

View File

@ -116,62 +116,58 @@ func TestPrioritizeResources(t *testing.T) {
}
}
func TestRestoreMethod(t *testing.T) {
func TestRestoreNamespaceFiltering(t *testing.T) {
tests := []struct {
name string
fileSystem *fakeFileSystem
baseDir string
restore *api.Restore
expectedReadDirs []string
prioritizedResources []schema.GroupResource
}{
{
name: "cluster comes before namespaced",
fileSystem: newFakeFileSystem().WithDirectories("bak/cluster", "bak/namespaces"),
baseDir: "bak",
restore: &api.Restore{Spec: api.RestoreSpec{}},
expectedReadDirs: []string{"bak/cluster", "bak/namespaces"},
},
{
name: "namespaces dir is not read & does not error if it does not exist",
fileSystem: newFakeFileSystem().WithDirectories("bak/cluster"),
baseDir: "bak",
restore: &api.Restore{Spec: api.RestoreSpec{}},
expectedReadDirs: []string{"bak/cluster"},
},
{
name: "namespacesToRestore having * restores all namespaces",
fileSystem: newFakeFileSystem().WithDirectories("bak/cluster", "bak/namespaces/a", "bak/namespaces/b", "bak/namespaces/c"),
fileSystem: newFakeFileSystem().WithDirectories("bak/resources/nodes/cluster", "bak/resources/secrets/namespaces/a", "bak/resources/secrets/namespaces/b", "bak/resources/secrets/namespaces/c"),
baseDir: "bak",
restore: &api.Restore{Spec: api.RestoreSpec{IncludedNamespaces: []string{"*"}}},
expectedReadDirs: []string{"bak/cluster", "bak/namespaces", "bak/namespaces/a", "bak/namespaces/b", "bak/namespaces/c"},
expectedReadDirs: []string{"bak/resources", "bak/resources/nodes/cluster", "bak/resources/secrets/namespaces", "bak/resources/secrets/namespaces/a", "bak/resources/secrets/namespaces/b", "bak/resources/secrets/namespaces/c"},
prioritizedResources: []schema.GroupResource{
schema.GroupResource{Resource: "nodes"},
schema.GroupResource{Resource: "secrets"},
},
},
{
name: "namespacesToRestore properly filters",
fileSystem: newFakeFileSystem().WithDirectories("bak/cluster", "bak/namespaces/a", "bak/namespaces/b", "bak/namespaces/c"),
fileSystem: newFakeFileSystem().WithDirectories("bak/resources/nodes/cluster", "bak/resources/secrets/namespaces/a", "bak/resources/secrets/namespaces/b", "bak/resources/secrets/namespaces/c"),
baseDir: "bak",
restore: &api.Restore{Spec: api.RestoreSpec{IncludedNamespaces: []string{"b", "c"}}},
expectedReadDirs: []string{"bak/cluster", "bak/namespaces", "bak/namespaces/b", "bak/namespaces/c"},
expectedReadDirs: []string{"bak/resources", "bak/resources/nodes/cluster", "bak/resources/secrets/namespaces", "bak/resources/secrets/namespaces/b", "bak/resources/secrets/namespaces/c"},
prioritizedResources: []schema.GroupResource{
schema.GroupResource{Resource: "nodes"},
schema.GroupResource{Resource: "secrets"},
},
{
name: "namespacesToRestore properly filters with inclusion filter",
fileSystem: newFakeFileSystem().WithDirectories("bak/cluster", "bak/namespaces/a", "bak/namespaces/b", "bak/namespaces/c"),
baseDir: "bak",
restore: &api.Restore{Spec: api.RestoreSpec{IncludedNamespaces: []string{"b", "c"}}},
expectedReadDirs: []string{"bak/cluster", "bak/namespaces", "bak/namespaces/b", "bak/namespaces/c"},
},
{
name: "namespacesToRestore properly filters with exclusion filter",
fileSystem: newFakeFileSystem().WithDirectories("bak/cluster", "bak/namespaces/a", "bak/namespaces/b", "bak/namespaces/c"),
fileSystem: newFakeFileSystem().WithDirectories("bak/resources/nodes/cluster", "bak/resources/secrets/namespaces/a", "bak/resources/secrets/namespaces/b", "bak/resources/secrets/namespaces/c"),
baseDir: "bak",
restore: &api.Restore{Spec: api.RestoreSpec{IncludedNamespaces: []string{"*"}, ExcludedNamespaces: []string{"a"}}},
expectedReadDirs: []string{"bak/cluster", "bak/namespaces", "bak/namespaces/b", "bak/namespaces/c"},
expectedReadDirs: []string{"bak/resources", "bak/resources/nodes/cluster", "bak/resources/secrets/namespaces", "bak/resources/secrets/namespaces/b", "bak/resources/secrets/namespaces/c"},
prioritizedResources: []schema.GroupResource{
schema.GroupResource{Resource: "nodes"},
schema.GroupResource{Resource: "secrets"},
},
},
{
name: "namespacesToRestore properly filters with inclusion & exclusion filters",
fileSystem: newFakeFileSystem().WithDirectories("bak/cluster", "bak/namespaces/a", "bak/namespaces/b", "bak/namespaces/c"),
fileSystem: newFakeFileSystem().WithDirectories("bak/resources/nodes/cluster", "bak/resources/secrets/namespaces/a", "bak/resources/secrets/namespaces/b", "bak/resources/secrets/namespaces/c"),
baseDir: "bak",
restore: &api.Restore{Spec: api.RestoreSpec{IncludedNamespaces: []string{"a", "b", "c"}, ExcludedNamespaces: []string{"b"}}},
expectedReadDirs: []string{"bak/cluster", "bak/namespaces", "bak/namespaces/a", "bak/namespaces/c"},
expectedReadDirs: []string{"bak/resources", "bak/resources/nodes/cluster", "bak/resources/secrets/namespaces", "bak/resources/secrets/namespaces/a", "bak/resources/secrets/namespaces/c"},
prioritizedResources: []schema.GroupResource{
schema.GroupResource{Resource: "nodes"},
schema.GroupResource{Resource: "secrets"},
},
},
}
@ -184,6 +180,7 @@ func TestRestoreMethod(t *testing.T) {
namespaceClient: &fakeNamespaceClient{},
fileSystem: test.fileSystem,
logger: log,
prioritizedResources: test.prioritizedResources,
}
warnings, errors := ctx.restoreFromDir(test.baseDir)
@ -199,62 +196,59 @@ func TestRestoreMethod(t *testing.T) {
}
}
func TestRestoreNamespace(t *testing.T) {
func TestRestorePriority(t *testing.T) {
tests := []struct {
name string
fileSystem *fakeFileSystem
restore *api.Restore
namespace string
path string
baseDir string
prioritizedResources []schema.GroupResource
expectedErrors api.RestoreResult
expectedReadDirs []string
}{
{
name: "cluster test",
fileSystem: newFakeFileSystem().WithDirectory("bak/cluster/a").WithDirectory("bak/cluster/c"),
namespace: "",
path: "bak/cluster",
fileSystem: newFakeFileSystem().WithDirectory("bak/resources/a/cluster").WithDirectory("bak/resources/c/cluster"),
baseDir: "bak",
restore: &api.Restore{Spec: api.RestoreSpec{IncludedNamespaces: []string{"*"}}},
prioritizedResources: []schema.GroupResource{
schema.GroupResource{Resource: "a"},
schema.GroupResource{Resource: "b"},
schema.GroupResource{Resource: "c"},
},
expectedReadDirs: []string{"bak/cluster", "bak/cluster/a", "bak/cluster/c"},
expectedReadDirs: []string{"bak/resources", "bak/resources/a/cluster", "bak/resources/c/cluster"},
},
{
name: "resource priorities are applied",
fileSystem: newFakeFileSystem().WithDirectory("bak/cluster/a").WithDirectory("bak/cluster/c"),
namespace: "",
path: "bak/cluster",
fileSystem: newFakeFileSystem().WithDirectory("bak/resources/a/cluster").WithDirectory("bak/resources/c/cluster"),
restore: &api.Restore{Spec: api.RestoreSpec{IncludedNamespaces: []string{"*"}}},
baseDir: "bak",
prioritizedResources: []schema.GroupResource{
schema.GroupResource{Resource: "c"},
schema.GroupResource{Resource: "b"},
schema.GroupResource{Resource: "a"},
},
expectedReadDirs: []string{"bak/cluster", "bak/cluster/c", "bak/cluster/a"},
expectedReadDirs: []string{"bak/resources", "bak/resources/c/cluster", "bak/resources/a/cluster"},
},
{
name: "basic namespace",
fileSystem: newFakeFileSystem().WithDirectory("bak/namespaces/ns-1/a").WithDirectory("bak/namespaces/ns-1/c"),
restore: &api.Restore{Spec: api.RestoreSpec{NamespaceMapping: make(map[string]string)}},
namespace: "ns-1",
path: "bak/namespaces/ns-1",
fileSystem: newFakeFileSystem().WithDirectory("bak/resources/a/namespaces/ns-1").WithDirectory("bak/resources/c/namespaces/ns-1"),
restore: &api.Restore{Spec: api.RestoreSpec{IncludedNamespaces: []string{"*"}}},
baseDir: "bak",
prioritizedResources: []schema.GroupResource{
schema.GroupResource{Resource: "a"},
schema.GroupResource{Resource: "b"},
schema.GroupResource{Resource: "c"},
},
expectedReadDirs: []string{"bak/namespaces/ns-1", "bak/namespaces/ns-1/a", "bak/namespaces/ns-1/c"},
expectedReadDirs: []string{"bak/resources", "bak/resources/a/namespaces", "bak/resources/a/namespaces/ns-1", "bak/resources/c/namespaces", "bak/resources/c/namespaces/ns-1"},
},
{
name: "error in a single resource doesn't terminate restore immediately, but is returned",
fileSystem: newFakeFileSystem().
WithFile("bak/namespaces/ns-1/a/invalid-json.json", []byte("invalid json")).
WithDirectory("bak/namespaces/ns-1/c"),
restore: &api.Restore{Spec: api.RestoreSpec{NamespaceMapping: make(map[string]string)}},
namespace: "ns-1",
path: "bak/namespaces/ns-1",
WithFile("bak/resources/a/namespaces/ns-1/invalid-json.json", []byte("invalid json")).
WithDirectory("bak/resources/c/namespaces/ns-1"),
restore: &api.Restore{Spec: api.RestoreSpec{IncludedNamespaces: []string{"*"}}},
baseDir: "bak",
prioritizedResources: []schema.GroupResource{
schema.GroupResource{Resource: "a"},
schema.GroupResource{Resource: "b"},
@ -262,10 +256,10 @@ func TestRestoreNamespace(t *testing.T) {
},
expectedErrors: api.RestoreResult{
Namespaces: map[string][]string{
"ns-1": {"error decoding \"bak/namespaces/ns-1/a/invalid-json.json\": invalid character 'i' looking for beginning of value"},
"ns-1": {"error decoding \"bak/resources/a/namespaces/ns-1/invalid-json.json\": invalid character 'i' looking for beginning of value"},
},
},
expectedReadDirs: []string{"bak/namespaces/ns-1", "bak/namespaces/ns-1/a", "bak/namespaces/ns-1/c"},
expectedReadDirs: []string{"bak/resources", "bak/resources/a/namespaces", "bak/resources/a/namespaces/ns-1", "bak/resources/c/namespaces", "bak/resources/c/namespaces/ns-1"},
},
}
@ -281,7 +275,7 @@ func TestRestoreNamespace(t *testing.T) {
logger: log,
}
warnings, errors := ctx.restoreNamespace(test.namespace, test.path)
warnings, errors := ctx.restoreFromDir(test.baseDir)
assert.Empty(t, warnings.Ark)
assert.Empty(t, warnings.Cluster)
@ -431,7 +425,7 @@ func TestRestoreResourceForNamespace(t *testing.T) {
logger: log,
}
warnings, errors := ctx.restoreResourceForNamespace(test.namespace, test.resourcePath)
warnings, errors := ctx.restoreResource("configmaps", test.namespace, test.resourcePath)
assert.Empty(t, warnings.Ark)
assert.Empty(t, warnings.Cluster)