Add pod exec backup hooks
Signed-off-by: Andy Goldstein <andy.goldstein@gmail.com>pull/112/head
parent
efcb32059a
commit
901f8e1302
|
@ -53,8 +53,70 @@ type BackupSpec struct {
|
||||||
// IncludeClusterResources specifies whether cluster-scoped resources
|
// IncludeClusterResources specifies whether cluster-scoped resources
|
||||||
// should be included for consideration in the backup.
|
// should be included for consideration in the backup.
|
||||||
IncludeClusterResources *bool `json:"includeClusterResources"`
|
IncludeClusterResources *bool `json:"includeClusterResources"`
|
||||||
|
|
||||||
|
// Hooks represent custom behaviors that should be executed at different phases of the backup.
|
||||||
|
Hooks BackupHooks `json:"hooks"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BackupHooks contains custom behaviors that should be executed at different phases of the backup.
|
||||||
|
type BackupHooks struct {
|
||||||
|
// Resources are hooks that should be executed when backing up individual instances of a resource.
|
||||||
|
Resources []BackupResourceHookSpec `json:"resources"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// BackupResourceHookSpec defines one or more BackupResourceHooks that should be executed based on
|
||||||
|
// the rules defined for namespaces, resources, and label selector.
|
||||||
|
type BackupResourceHookSpec struct {
|
||||||
|
// Name is the name of this hook.
|
||||||
|
Name string `json:"name"`
|
||||||
|
// IncludedNamespaces specifies the namespaces to which this hook spec applies. If empty, it applies
|
||||||
|
// to all namespaces.
|
||||||
|
IncludedNamespaces []string `json:"includedNamespaces"`
|
||||||
|
// ExcludedNamespaces specifies the namespaces to which this hook spec does not apply.
|
||||||
|
ExcludedNamespaces []string `json:"excludedNamespaces"`
|
||||||
|
// IncludedResources specifies the resources to which this hook spec applies. If empty, it applies
|
||||||
|
// to all resources.
|
||||||
|
IncludedResources []string `json:"includedResources"`
|
||||||
|
// ExcludedResources specifies the resources to which this hook spec does not apply.
|
||||||
|
ExcludedResources []string `json:"excludedResources"`
|
||||||
|
// LabelSelector, if specified, filters the resources to which this hook spec applies.
|
||||||
|
LabelSelector *metav1.LabelSelector `json:"labelSelector"`
|
||||||
|
// Hooks is a list of BackupResourceHooks to execute.
|
||||||
|
Hooks []BackupResourceHook `json:"hooks"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// BackupResourceHook defines a hook for a resource.
|
||||||
|
type BackupResourceHook struct {
|
||||||
|
// Exec defines an exec hook.
|
||||||
|
Exec *ExecHook `json:"exec"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecHook is a hook that uses the pod exec API to execute a command in a container in a pod.
|
||||||
|
type ExecHook struct {
|
||||||
|
// Container is the container in the pod where the command should be executed. If not specified,
|
||||||
|
// the pod's first container is used.
|
||||||
|
Container string `json:"container"`
|
||||||
|
// Command is the command and arguments to execute.
|
||||||
|
Command []string `json:"command"`
|
||||||
|
// OnError specifies how Ark should behave if it encounters an error executing this hook.
|
||||||
|
OnError HookErrorMode `json:"onError"`
|
||||||
|
// Timeout defines the maximum amount of time Ark should wait for the hook to complete before
|
||||||
|
// considering the execution a failure.
|
||||||
|
Timeout metav1.Duration `json:"timeout"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// HookErrorMode defines how Ark should treat an error from a hook.
|
||||||
|
type HookErrorMode string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// HookErrorModeContinue means that an error from a hook is acceptable, and the backup can
|
||||||
|
// proceed.
|
||||||
|
HookErrorModeContinue HookErrorMode = "Continue"
|
||||||
|
// HookErrorModeFail means that an error from a hook is problematic, and the backup should be in
|
||||||
|
// error.
|
||||||
|
HookErrorModeFail HookErrorMode = "Fail"
|
||||||
|
)
|
||||||
|
|
||||||
// BackupPhase is a string representation of the lifecycle phase
|
// BackupPhase is a string representation of the lifecycle phase
|
||||||
// of an Ark backup.
|
// of an Ark backup.
|
||||||
type BackupPhase string
|
type BackupPhase string
|
||||||
|
|
|
@ -19,17 +19,12 @@ package backup
|
||||||
import (
|
import (
|
||||||
"archive/tar"
|
"archive/tar"
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/api/meta"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
@ -39,6 +34,7 @@ import (
|
||||||
"github.com/heptio/ark/pkg/client"
|
"github.com/heptio/ark/pkg/client"
|
||||||
"github.com/heptio/ark/pkg/discovery"
|
"github.com/heptio/ark/pkg/discovery"
|
||||||
"github.com/heptio/ark/pkg/util/collections"
|
"github.com/heptio/ark/pkg/util/collections"
|
||||||
|
kubeutil "github.com/heptio/ark/pkg/util/kube"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Backupper performs backups.
|
// Backupper performs backups.
|
||||||
|
@ -50,28 +46,27 @@ type Backupper interface {
|
||||||
|
|
||||||
// kubernetesBackupper implements Backupper.
|
// kubernetesBackupper implements Backupper.
|
||||||
type kubernetesBackupper struct {
|
type kubernetesBackupper struct {
|
||||||
dynamicFactory client.DynamicFactory
|
dynamicFactory client.DynamicFactory
|
||||||
discoveryHelper discovery.Helper
|
discoveryHelper discovery.Helper
|
||||||
actions map[schema.GroupResource]Action
|
actions map[schema.GroupResource]Action
|
||||||
itemBackupper itemBackupper
|
podCommandExecutor podCommandExecutor
|
||||||
|
|
||||||
|
groupBackupperFactory groupBackupperFactory
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ Backupper = &kubernetesBackupper{}
|
// ResourceIdentifier describes a single item by its group, resource, namespace, and name.
|
||||||
|
type ResourceIdentifier struct {
|
||||||
// ActionContext contains contextual information for actions.
|
schema.GroupResource
|
||||||
type ActionContext struct {
|
Namespace string
|
||||||
logger *logrus.Logger
|
Name string
|
||||||
}
|
|
||||||
|
|
||||||
func (ac ActionContext) infof(msg string, args ...interface{}) {
|
|
||||||
ac.logger.Infof(msg, args...)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Action is an actor that performs an operation on an individual item being backed up.
|
// Action is an actor that performs an operation on an individual item being backed up.
|
||||||
type Action interface {
|
type Action interface {
|
||||||
// Execute is invoked on an item being backed up. If an error is returned, the Backup is marked as
|
// Execute allows the Action to perform arbitrary logic with the item being backed up and the
|
||||||
// failed.
|
// backup itself. Implementations may return additional ResourceIdentifiers that indicate specific
|
||||||
Execute(ctx *backupContext, item map[string]interface{}, backupper itemBackupper) error
|
// items that also need to be backed up.
|
||||||
|
Execute(log *logrus.Entry, item runtime.Unstructured, backup *api.Backup) ([]ResourceIdentifier, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type itemKey struct {
|
type itemKey struct {
|
||||||
|
@ -89,6 +84,7 @@ func NewKubernetesBackupper(
|
||||||
discoveryHelper discovery.Helper,
|
discoveryHelper discovery.Helper,
|
||||||
dynamicFactory client.DynamicFactory,
|
dynamicFactory client.DynamicFactory,
|
||||||
actions map[string]Action,
|
actions map[string]Action,
|
||||||
|
podCommandExecutor podCommandExecutor,
|
||||||
) (Backupper, error) {
|
) (Backupper, error) {
|
||||||
resolvedActions, err := resolveActions(discoveryHelper, actions)
|
resolvedActions, err := resolveActions(discoveryHelper, actions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -96,10 +92,12 @@ func NewKubernetesBackupper(
|
||||||
}
|
}
|
||||||
|
|
||||||
return &kubernetesBackupper{
|
return &kubernetesBackupper{
|
||||||
discoveryHelper: discoveryHelper,
|
discoveryHelper: discoveryHelper,
|
||||||
dynamicFactory: dynamicFactory,
|
dynamicFactory: dynamicFactory,
|
||||||
actions: resolvedActions,
|
actions: resolvedActions,
|
||||||
itemBackupper: &realItemBackupper{},
|
podCommandExecutor: podCommandExecutor,
|
||||||
|
|
||||||
|
groupBackupperFactory: &defaultGroupBackupperFactory{},
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -122,14 +120,13 @@ func resolveActions(helper discovery.Helper, actions map[string]Action) (map[sch
|
||||||
// getResourceIncludesExcludes takes the lists of resources to include and exclude, uses the
|
// getResourceIncludesExcludes takes the lists of resources to include and exclude, uses the
|
||||||
// discovery helper to resolve them to fully-qualified group-resource names, and returns an
|
// discovery helper to resolve them to fully-qualified group-resource names, and returns an
|
||||||
// IncludesExcludes list.
|
// IncludesExcludes list.
|
||||||
func (ctx *backupContext) getResourceIncludesExcludes(helper discovery.Helper, includes, excludes []string) *collections.IncludesExcludes {
|
func getResourceIncludesExcludes(helper discovery.Helper, includes, excludes []string) *collections.IncludesExcludes {
|
||||||
resources := collections.GenerateIncludesExcludes(
|
resources := collections.GenerateIncludesExcludes(
|
||||||
includes,
|
includes,
|
||||||
excludes,
|
excludes,
|
||||||
func(item string) string {
|
func(item string) string {
|
||||||
gvr, _, err := helper.ResourceFor(schema.ParseGroupResource(item).WithVersion(""))
|
gvr, _, err := helper.ResourceFor(schema.ParseGroupResource(item).WithVersion(""))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx.infof("Unable to resolve resource %q: %v", item, err)
|
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -138,9 +135,6 @@ func (ctx *backupContext) getResourceIncludesExcludes(helper discovery.Helper, i
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
ctx.infof("Including resources: %v", strings.Join(resources.GetIncludes(), ", "))
|
|
||||||
ctx.infof("Excluding resources: %v", strings.Join(resources.GetExcludes(), ", "))
|
|
||||||
|
|
||||||
return resources
|
return resources
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -150,33 +144,29 @@ func getNamespaceIncludesExcludes(backup *api.Backup) *collections.IncludesExclu
|
||||||
return collections.NewIncludesExcludes().Includes(backup.Spec.IncludedNamespaces...).Excludes(backup.Spec.ExcludedNamespaces...)
|
return collections.NewIncludesExcludes().Includes(backup.Spec.IncludedNamespaces...).Excludes(backup.Spec.ExcludedNamespaces...)
|
||||||
}
|
}
|
||||||
|
|
||||||
type backupContext struct {
|
func getResourceHooks(hookSpecs []api.BackupResourceHookSpec, discoveryHelper discovery.Helper) ([]resourceHook, error) {
|
||||||
backup *api.Backup
|
resourceHooks := make([]resourceHook, 0, len(hookSpecs))
|
||||||
w tarWriter
|
|
||||||
logger *logrus.Logger
|
|
||||||
namespaceIncludesExcludes *collections.IncludesExcludes
|
|
||||||
resourceIncludesExcludes *collections.IncludesExcludes
|
|
||||||
// deploymentsBackedUp marks whether we've seen and are backing up the deployments resource, from
|
|
||||||
// either the apps or extensions api groups. We only want to back them up once, from whichever api
|
|
||||||
// group we see first.
|
|
||||||
deploymentsBackedUp bool
|
|
||||||
// networkPoliciesBackedUp marks whether we've seen and are backing up the networkpolicies
|
|
||||||
// resource, from either the networking.k8s.io or extensions api groups. We only want to back them
|
|
||||||
// up once, from whichever api group we see first.
|
|
||||||
networkPoliciesBackedUp bool
|
|
||||||
|
|
||||||
actions map[schema.GroupResource]Action
|
for _, r := range hookSpecs {
|
||||||
|
h := resourceHook{
|
||||||
|
name: r.Name,
|
||||||
|
namespaces: collections.NewIncludesExcludes().Includes(r.IncludedNamespaces...).Excludes(r.ExcludedNamespaces...),
|
||||||
|
resources: getResourceIncludesExcludes(discoveryHelper, r.IncludedResources, r.ExcludedResources),
|
||||||
|
hooks: r.Hooks,
|
||||||
|
}
|
||||||
|
|
||||||
// backedUpItems keeps track of items that have been backed up already.
|
if r.LabelSelector != nil {
|
||||||
backedUpItems map[itemKey]struct{}
|
labelSelector, err := metav1.LabelSelectorAsSelector(r.LabelSelector)
|
||||||
|
if err != nil {
|
||||||
|
return []resourceHook{}, errors.WithStack(err)
|
||||||
|
}
|
||||||
|
h.labelSelector = labelSelector
|
||||||
|
}
|
||||||
|
|
||||||
dynamicFactory client.DynamicFactory
|
resourceHooks = append(resourceHooks, h)
|
||||||
|
}
|
||||||
|
|
||||||
discoveryHelper discovery.Helper
|
return resourceHooks, nil
|
||||||
}
|
|
||||||
|
|
||||||
func (ctx *backupContext) infof(msg string, args ...interface{}) {
|
|
||||||
ctx.logger.Infof(msg, args...)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Backup backs up the items specified in the Backup, placing them in a gzip-compressed tar file
|
// Backup backs up the items specified in the Backup, placing them in a gzip-compressed tar file
|
||||||
|
@ -191,38 +181,64 @@ func (kb *kubernetesBackupper) Backup(backup *api.Backup, backupFile, logFile io
|
||||||
gzippedLog := gzip.NewWriter(logFile)
|
gzippedLog := gzip.NewWriter(logFile)
|
||||||
defer gzippedLog.Close()
|
defer gzippedLog.Close()
|
||||||
|
|
||||||
var errs []error
|
logger := logrus.New()
|
||||||
|
logger.Out = gzippedLog
|
||||||
|
log := logger.WithField("backup", kubeutil.NamespaceAndName(backup))
|
||||||
|
log.Info("Starting backup")
|
||||||
|
|
||||||
log := logrus.New()
|
namespaceIncludesExcludes := getNamespaceIncludesExcludes(backup)
|
||||||
log.Out = gzippedLog
|
log.Infof("Including namespaces: %s", namespaceIncludesExcludes.IncludesString())
|
||||||
|
log.Infof("Excluding namespaces: %s", namespaceIncludesExcludes.ExcludesString())
|
||||||
|
|
||||||
ctx := &backupContext{
|
resourceIncludesExcludes := getResourceIncludesExcludes(kb.discoveryHelper, backup.Spec.IncludedResources, backup.Spec.ExcludedResources)
|
||||||
backup: backup,
|
log.Infof("Including resources: %s", resourceIncludesExcludes.IncludesString())
|
||||||
w: tw,
|
log.Infof("Excluding resources: %s", resourceIncludesExcludes.ExcludesString())
|
||||||
logger: log,
|
|
||||||
namespaceIncludesExcludes: getNamespaceIncludesExcludes(backup),
|
resourceHooks, err := getResourceHooks(backup.Spec.Hooks.Resources, kb.discoveryHelper)
|
||||||
backedUpItems: make(map[itemKey]struct{}),
|
if err != nil {
|
||||||
actions: kb.actions,
|
return err
|
||||||
dynamicFactory: kb.dynamicFactory,
|
|
||||||
discoveryHelper: kb.discoveryHelper,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx.infof("Starting backup")
|
var labelSelector string
|
||||||
|
if backup.Spec.LabelSelector != nil {
|
||||||
|
labelSelector = metav1.FormatLabelSelector(backup.Spec.LabelSelector)
|
||||||
|
}
|
||||||
|
|
||||||
ctx.resourceIncludesExcludes = ctx.getResourceIncludesExcludes(kb.discoveryHelper, backup.Spec.IncludedResources, backup.Spec.ExcludedResources)
|
backedUpItems := make(map[itemKey]struct{})
|
||||||
|
var errs []error
|
||||||
|
|
||||||
|
cohabitatingResources := map[string]*cohabitatingResource{
|
||||||
|
"deployments": newCohabitatingResource("deployments", "extensions", "apps"),
|
||||||
|
"networkpolicies": newCohabitatingResource("networkpolicies", "extensions", "networking.k8s.io"),
|
||||||
|
}
|
||||||
|
|
||||||
|
gb := kb.groupBackupperFactory.newGroupBackupper(
|
||||||
|
log,
|
||||||
|
backup,
|
||||||
|
namespaceIncludesExcludes,
|
||||||
|
resourceIncludesExcludes,
|
||||||
|
labelSelector,
|
||||||
|
kb.dynamicFactory,
|
||||||
|
kb.discoveryHelper,
|
||||||
|
backedUpItems,
|
||||||
|
cohabitatingResources,
|
||||||
|
kb.actions,
|
||||||
|
kb.podCommandExecutor,
|
||||||
|
tw,
|
||||||
|
resourceHooks,
|
||||||
|
)
|
||||||
|
|
||||||
for _, group := range kb.discoveryHelper.Resources() {
|
for _, group := range kb.discoveryHelper.Resources() {
|
||||||
ctx.infof("Processing group %s", group.GroupVersion)
|
if err := gb.backupGroup(group); err != nil {
|
||||||
if err := kb.backupGroup(ctx, group); err != nil {
|
|
||||||
errs = append(errs, err)
|
errs = append(errs, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err := kuberrs.NewAggregate(errs)
|
err = kuberrs.NewAggregate(errs)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
ctx.infof("Backup completed successfully")
|
log.Infof("Backup completed successfully")
|
||||||
} else {
|
} else {
|
||||||
ctx.infof("Backup completed with errors: %v", err)
|
log.Infof("Backup completed with errors: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
|
@ -233,272 +249,3 @@ type tarWriter interface {
|
||||||
Write([]byte) (int, error)
|
Write([]byte) (int, error)
|
||||||
WriteHeader(*tar.Header) error
|
WriteHeader(*tar.Header) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// backupGroup backs up a single API group.
|
|
||||||
func (kb *kubernetesBackupper) backupGroup(ctx *backupContext, group *metav1.APIResourceList) error {
|
|
||||||
var (
|
|
||||||
errs []error
|
|
||||||
pv *metav1.APIResource
|
|
||||||
)
|
|
||||||
|
|
||||||
processResource := func(resource metav1.APIResource) {
|
|
||||||
ctx.infof("Processing resource %s/%s", group.GroupVersion, resource.Name)
|
|
||||||
if err := kb.backupResource(ctx, group, resource); err != nil {
|
|
||||||
errs = append(errs, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, resource := range group.APIResources {
|
|
||||||
// do PVs last because if we're also backing up PVCs, we want to backup
|
|
||||||
// PVs within the scope of the PVCs (within the PVC action) to allow
|
|
||||||
// for hooks to run
|
|
||||||
if strings.ToLower(resource.Name) == "persistentvolumes" && strings.ToLower(group.GroupVersion) == "v1" {
|
|
||||||
pvResource := resource
|
|
||||||
pv = &pvResource
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
processResource(resource)
|
|
||||||
}
|
|
||||||
|
|
||||||
if pv != nil {
|
|
||||||
processResource(*pv)
|
|
||||||
}
|
|
||||||
|
|
||||||
return kuberrs.NewAggregate(errs)
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
appsDeploymentsResource = "deployments.apps"
|
|
||||||
extensionsDeploymentsResource = "deployments.extensions"
|
|
||||||
networkingNetworkPoliciesResource = "networkpolicies.networking.k8s.io"
|
|
||||||
extensionsNetworkPoliciesResource = "networkpolicies.extensions"
|
|
||||||
)
|
|
||||||
|
|
||||||
// backupResource backs up all the objects for a given group-version-resource.
|
|
||||||
func (kb *kubernetesBackupper) backupResource(
|
|
||||||
ctx *backupContext,
|
|
||||||
group *metav1.APIResourceList,
|
|
||||||
resource metav1.APIResource,
|
|
||||||
) error {
|
|
||||||
var errs []error
|
|
||||||
|
|
||||||
gv, err := schema.ParseGroupVersion(group.GroupVersion)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "error parsing GroupVersion %s", group.GroupVersion)
|
|
||||||
}
|
|
||||||
gvr := schema.GroupVersionResource{Group: gv.Group, Version: gv.Version}
|
|
||||||
gr := schema.GroupResource{Group: gv.Group, Resource: resource.Name}
|
|
||||||
grString := gr.String()
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case ctx.backup.Spec.IncludeClusterResources == nil:
|
|
||||||
// when IncludeClusterResources == nil (auto), only directly
|
|
||||||
// back up cluster-scoped resources if we're doing a full-cluster
|
|
||||||
// (all namespaces) backup. Note that in the case of a subset of
|
|
||||||
// namespaces being backed up, some related cluster-scoped resources
|
|
||||||
// may still be backed up if triggered by a custom action (e.g. PVC->PV).
|
|
||||||
if !resource.Namespaced && !ctx.namespaceIncludesExcludes.IncludeEverything() {
|
|
||||||
ctx.infof("Skipping resource %s because it's cluster-scoped and only specific namespaces are included in the backup", grString)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
case *ctx.backup.Spec.IncludeClusterResources == false:
|
|
||||||
if !resource.Namespaced {
|
|
||||||
ctx.infof("Skipping resource %s because it's cluster-scoped", grString)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
case *ctx.backup.Spec.IncludeClusterResources == true:
|
|
||||||
// include the resource, no action required
|
|
||||||
}
|
|
||||||
|
|
||||||
if !ctx.resourceIncludesExcludes.ShouldInclude(grString) {
|
|
||||||
ctx.infof("Resource %s is excluded", grString)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
shouldBackup := func(gr, gr1, gr2 string, backedUp *bool) bool {
|
|
||||||
// if it's neither of the specified dupe group-resources, back it up
|
|
||||||
if gr != gr1 && gr != gr2 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// if it hasn't been backed up yet, back it up
|
|
||||||
if !*backedUp {
|
|
||||||
*backedUp = true
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// else, don't back it up, and log why
|
|
||||||
var other string
|
|
||||||
switch gr {
|
|
||||||
case gr1:
|
|
||||||
other = gr2
|
|
||||||
case gr2:
|
|
||||||
other = gr1
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.infof("Skipping resource %q because it's a duplicate of %q", gr, other)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if !shouldBackup(grString, appsDeploymentsResource, extensionsDeploymentsResource, &ctx.deploymentsBackedUp) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if !shouldBackup(grString, networkingNetworkPoliciesResource, extensionsNetworkPoliciesResource, &ctx.networkPoliciesBackedUp) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var namespacesToList []string
|
|
||||||
if resource.Namespaced {
|
|
||||||
namespacesToList = getNamespacesToList(ctx.namespaceIncludesExcludes)
|
|
||||||
} else {
|
|
||||||
namespacesToList = []string{""}
|
|
||||||
}
|
|
||||||
for _, namespace := range namespacesToList {
|
|
||||||
resourceClient, err := kb.dynamicFactory.ClientForGroupVersionResource(gvr, resource, namespace)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
labelSelector := ""
|
|
||||||
if ctx.backup.Spec.LabelSelector != nil {
|
|
||||||
labelSelector = metav1.FormatLabelSelector(ctx.backup.Spec.LabelSelector)
|
|
||||||
}
|
|
||||||
unstructuredList, err := resourceClient.List(metav1.ListOptions{LabelSelector: labelSelector})
|
|
||||||
if err != nil {
|
|
||||||
return errors.WithStack(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// do the backup
|
|
||||||
items, err := meta.ExtractList(unstructuredList)
|
|
||||||
if err != nil {
|
|
||||||
return errors.WithStack(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, item := range items {
|
|
||||||
unstructured, ok := item.(runtime.Unstructured)
|
|
||||||
if !ok {
|
|
||||||
errs = append(errs, errors.Errorf("unexpected type %T", item))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
obj := unstructured.UnstructuredContent()
|
|
||||||
|
|
||||||
if err := kb.itemBackupper.backupItem(ctx, obj, gr); err != nil {
|
|
||||||
errs = append(errs, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return kuberrs.NewAggregate(errs)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getNamespacesToList examines ie and resolves the includes and excludes to a full list of
|
|
||||||
// namespaces to list. If ie is nil or it includes *, the result is just "" (list across all
|
|
||||||
// namespaces). Otherwise, the result is a list of every included namespace minus all excluded ones.
|
|
||||||
func getNamespacesToList(ie *collections.IncludesExcludes) []string {
|
|
||||||
if ie == nil {
|
|
||||||
return []string{""}
|
|
||||||
}
|
|
||||||
|
|
||||||
if ie.ShouldInclude("*") {
|
|
||||||
// "" means all namespaces
|
|
||||||
return []string{""}
|
|
||||||
}
|
|
||||||
|
|
||||||
var list []string
|
|
||||||
for _, i := range ie.GetIncludes() {
|
|
||||||
if ie.ShouldInclude(i) {
|
|
||||||
list = append(list, i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return list
|
|
||||||
}
|
|
||||||
|
|
||||||
type itemBackupper interface {
|
|
||||||
backupItem(ctx *backupContext, item map[string]interface{}, groupResource schema.GroupResource) error
|
|
||||||
}
|
|
||||||
|
|
||||||
type realItemBackupper struct{}
|
|
||||||
|
|
||||||
// backupItem backs up an individual item to tarWriter. The item may be excluded based on the
|
|
||||||
// namespaces IncludesExcludes list.
|
|
||||||
func (ib *realItemBackupper) backupItem(ctx *backupContext, item map[string]interface{}, groupResource schema.GroupResource) error {
|
|
||||||
name, err := collections.GetString(item, "metadata.name")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace, err := collections.GetString(item, "metadata.namespace")
|
|
||||||
// a non-nil error is assumed to be due to a cluster-scoped item
|
|
||||||
if err == nil && !ctx.namespaceIncludesExcludes.ShouldInclude(namespace) {
|
|
||||||
ctx.infof("Excluding item %s because namespace %s is excluded", name, namespace)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if namespace == "" && ctx.backup.Spec.IncludeClusterResources != nil && *ctx.backup.Spec.IncludeClusterResources == false {
|
|
||||||
ctx.infof("Excluding item %s because resource %s is cluster-scoped and IncludeClusterResources is false", name, groupResource.String())
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if !ctx.resourceIncludesExcludes.ShouldInclude(groupResource.String()) {
|
|
||||||
ctx.infof("Excluding item %s because resource %s is excluded", name, groupResource.String())
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
key := itemKey{
|
|
||||||
resource: groupResource.String(),
|
|
||||||
namespace: namespace,
|
|
||||||
name: name,
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, exists := ctx.backedUpItems[key]; exists {
|
|
||||||
ctx.infof("Skipping item %s because it's already been backed up.", name)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
ctx.backedUpItems[key] = struct{}{}
|
|
||||||
|
|
||||||
// Never save status
|
|
||||||
delete(item, "status")
|
|
||||||
|
|
||||||
if action, hasAction := ctx.actions[groupResource]; hasAction {
|
|
||||||
ctx.infof("Executing action on %s, ns=%s, name=%s", groupResource.String(), namespace, name)
|
|
||||||
|
|
||||||
if err := action.Execute(ctx, item, ib); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.infof("Backing up resource=%s, ns=%s, name=%s", groupResource.String(), namespace, name)
|
|
||||||
|
|
||||||
var filePath string
|
|
||||||
if namespace != "" {
|
|
||||||
filePath = filepath.Join(api.ResourcesDir, groupResource.String(), api.NamespaceScopedDir, namespace, name+".json")
|
|
||||||
} else {
|
|
||||||
filePath = filepath.Join(api.ResourcesDir, groupResource.String(), api.ClusterScopedDir, name+".json")
|
|
||||||
}
|
|
||||||
|
|
||||||
itemBytes, err := json.Marshal(item)
|
|
||||||
if err != nil {
|
|
||||||
return errors.WithStack(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
hdr := &tar.Header{
|
|
||||||
Name: filePath,
|
|
||||||
Size: int64(len(itemBytes)),
|
|
||||||
Typeflag: tar.TypeReg,
|
|
||||||
Mode: 0755,
|
|
||||||
ModTime: time.Now(),
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := ctx.w.WriteHeader(hdr); err != nil {
|
|
||||||
return errors.WithStack(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := ctx.w.Write(itemBytes); err != nil {
|
|
||||||
return errors.WithStack(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
|
@ -18,10 +18,12 @@ package backup
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
|
||||||
|
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||||
"github.com/heptio/ark/pkg/util/collections"
|
"github.com/heptio/ark/pkg/util/collections"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -30,51 +32,29 @@ import (
|
||||||
type backupPVAction struct {
|
type backupPVAction struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ Action = &backupPVAction{}
|
|
||||||
|
|
||||||
func NewBackupPVAction() Action {
|
func NewBackupPVAction() Action {
|
||||||
return &backupPVAction{}
|
return &backupPVAction{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var pvGroupResource = schema.GroupResource{Group: "", Resource: "persistentvolumes"}
|
||||||
|
|
||||||
// Execute finds the PersistentVolume referenced by the provided
|
// Execute finds the PersistentVolume referenced by the provided
|
||||||
// PersistentVolumeClaim and backs it up
|
// PersistentVolumeClaim and backs it up
|
||||||
func (a *backupPVAction) Execute(ctx *backupContext, pvc map[string]interface{}, backupper itemBackupper) error {
|
func (a *backupPVAction) Execute(log *logrus.Entry, item runtime.Unstructured, backup *v1.Backup) ([]ResourceIdentifier, error) {
|
||||||
pvcName, err := collections.GetString(pvc, "metadata.name")
|
log.Info("Executing backupPVAction")
|
||||||
if err != nil {
|
var additionalItems []ResourceIdentifier
|
||||||
ctx.infof("unable to get metadata.name for PersistentVolumeClaim: %v", err)
|
|
||||||
return err
|
pvc := item.UnstructuredContent()
|
||||||
}
|
|
||||||
|
|
||||||
volumeName, err := collections.GetString(pvc, "spec.volumeName")
|
volumeName, err := collections.GetString(pvc, "spec.volumeName")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx.infof("unable to get spec.volumeName for PersistentVolumeClaim %s: %v", pvcName, err)
|
return additionalItems, errors.WithMessage(err, "unable to get spec.volumeName")
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
gvr, resource, err := ctx.discoveryHelper.ResourceFor(schema.GroupVersionResource{Resource: "persistentvolumes"})
|
additionalItems = append(additionalItems, ResourceIdentifier{
|
||||||
if err != nil {
|
GroupResource: pvGroupResource,
|
||||||
ctx.infof("error getting GroupVersionResource for PersistentVolumes: %v", err)
|
Name: volumeName,
|
||||||
return err
|
})
|
||||||
}
|
|
||||||
gr := gvr.GroupResource()
|
|
||||||
|
|
||||||
client, err := ctx.dynamicFactory.ClientForGroupVersionResource(gvr, resource, "")
|
return additionalItems, nil
|
||||||
if err != nil {
|
|
||||||
ctx.infof("error getting client for GroupVersionResource=%s, Resource=%s: %v", gvr.String(), resource, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
pv, err := client.Get(volumeName, metav1.GetOptions{})
|
|
||||||
if err != nil {
|
|
||||||
ctx.infof("error getting PersistentVolume %s: %v", volumeName, err)
|
|
||||||
return errors.WithStack(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.infof("backing up PersistentVolume %s for PersistentVolumeClaim %s", volumeName, pvcName)
|
|
||||||
if err := backupper.backupItem(ctx, pv.UnstructuredContent(), gr); err != nil {
|
|
||||||
ctx.infof("error backing up PersistentVolume %s: %v", volumeName, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2017 Heptio Inc.
|
Copyright 2017 the Heptio Ark contributors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
|
@ -19,77 +19,30 @@ package backup
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
arktest "github.com/heptio/ark/pkg/util/test"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
|
||||||
|
|
||||||
testutil "github.com/heptio/ark/pkg/util/test"
|
|
||||||
testlogger "github.com/sirupsen/logrus/hooks/test"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestBackupPVAction(t *testing.T) {
|
func TestBackupPVAction(t *testing.T) {
|
||||||
tests := []struct {
|
pvc := &unstructured.Unstructured{
|
||||||
name string
|
Object: map[string]interface{}{
|
||||||
item map[string]interface{}
|
"spec": map[string]interface{}{},
|
||||||
volumeName string
|
|
||||||
expectedErr bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "execute PV backup in normal case",
|
|
||||||
item: map[string]interface{}{
|
|
||||||
"metadata": map[string]interface{}{"name": "pvc-1"},
|
|
||||||
"spec": map[string]interface{}{"volumeName": "pv-1"},
|
|
||||||
},
|
|
||||||
volumeName: "pv-1",
|
|
||||||
expectedErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "error when PVC has no metadata.name",
|
|
||||||
item: map[string]interface{}{
|
|
||||||
"metadata": map[string]interface{}{},
|
|
||||||
"spec": map[string]interface{}{"volumeName": "pv-1"},
|
|
||||||
},
|
|
||||||
expectedErr: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "error when PVC has no spec.volumeName",
|
|
||||||
item: map[string]interface{}{
|
|
||||||
"metadata": map[string]interface{}{"name": "pvc-1"},
|
|
||||||
"spec": map[string]interface{}{},
|
|
||||||
},
|
|
||||||
expectedErr: true,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
backup := &v1.Backup{}
|
||||||
t.Run(test.name, func(t *testing.T) {
|
|
||||||
var (
|
|
||||||
discoveryHelper = testutil.NewFakeDiscoveryHelper(true, nil)
|
|
||||||
dynamicFactory = &testutil.FakeDynamicFactory{}
|
|
||||||
dynamicClient = &testutil.FakeDynamicClient{}
|
|
||||||
testLogger, _ = testlogger.NewNullLogger()
|
|
||||||
ctx = &backupContext{discoveryHelper: discoveryHelper, dynamicFactory: dynamicFactory, logger: testLogger}
|
|
||||||
backupper = &fakeItemBackupper{}
|
|
||||||
action = NewBackupPVAction()
|
|
||||||
pv = &unstructured.Unstructured{}
|
|
||||||
pvGVR = schema.GroupVersionResource{Resource: "persistentvolumes"}
|
|
||||||
)
|
|
||||||
|
|
||||||
dynamicFactory.On("ClientForGroupVersionResource",
|
a := NewBackupPVAction()
|
||||||
pvGVR,
|
|
||||||
metav1.APIResource{Name: "persistentvolumes"},
|
|
||||||
"",
|
|
||||||
).Return(dynamicClient, nil)
|
|
||||||
|
|
||||||
dynamicClient.On("Get", test.volumeName, metav1.GetOptions{}).Return(pv, nil)
|
additional, err := a.Execute(arktest.NewLogger(), pvc, backup)
|
||||||
|
assert.EqualError(t, err, "unable to get spec.volumeName: key volumeName not found")
|
||||||
|
|
||||||
backupper.On("backupItem", ctx, pv.UnstructuredContent(), pvGVR.GroupResource()).Return(nil)
|
pvc.Object["spec"].(map[string]interface{})["volumeName"] = "myVolume"
|
||||||
|
additional, err = a.Execute(arktest.NewLogger(), pvc, backup)
|
||||||
// method under test
|
require.NoError(t, err)
|
||||||
res := action.Execute(ctx, test.item, backupper)
|
require.Len(t, additional, 1)
|
||||||
|
assert.Equal(t, ResourceIdentifier{GroupResource: pvGroupResource, Name: "myVolume"}, additional[0])
|
||||||
assert.Equal(t, test.expectedErr, res != nil)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,151 @@
|
||||||
|
/*
|
||||||
|
Copyright 2017 the Heptio Ark contributors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package backup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||||
|
"github.com/heptio/ark/pkg/client"
|
||||||
|
"github.com/heptio/ark/pkg/discovery"
|
||||||
|
"github.com/heptio/ark/pkg/util/collections"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
kuberrs "k8s.io/apimachinery/pkg/util/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
type groupBackupperFactory interface {
|
||||||
|
newGroupBackupper(
|
||||||
|
log *logrus.Entry,
|
||||||
|
backup *v1.Backup,
|
||||||
|
namespaces, resources *collections.IncludesExcludes,
|
||||||
|
labelSelector string,
|
||||||
|
dynamicFactory client.DynamicFactory,
|
||||||
|
discoveryHelper discovery.Helper,
|
||||||
|
backedUpItems map[itemKey]struct{},
|
||||||
|
cohabitatingResources map[string]*cohabitatingResource,
|
||||||
|
actions map[schema.GroupResource]Action,
|
||||||
|
podCommandExecutor podCommandExecutor,
|
||||||
|
tarWriter tarWriter,
|
||||||
|
resourceHooks []resourceHook,
|
||||||
|
) groupBackupper
|
||||||
|
}
|
||||||
|
|
||||||
|
type defaultGroupBackupperFactory struct{}
|
||||||
|
|
||||||
|
func (f *defaultGroupBackupperFactory) newGroupBackupper(
|
||||||
|
log *logrus.Entry,
|
||||||
|
backup *v1.Backup,
|
||||||
|
namespaces, resources *collections.IncludesExcludes,
|
||||||
|
labelSelector string,
|
||||||
|
dynamicFactory client.DynamicFactory,
|
||||||
|
discoveryHelper discovery.Helper,
|
||||||
|
backedUpItems map[itemKey]struct{},
|
||||||
|
cohabitatingResources map[string]*cohabitatingResource,
|
||||||
|
actions map[schema.GroupResource]Action,
|
||||||
|
podCommandExecutor podCommandExecutor,
|
||||||
|
tarWriter tarWriter,
|
||||||
|
resourceHooks []resourceHook,
|
||||||
|
) groupBackupper {
|
||||||
|
return &defaultGroupBackupper{
|
||||||
|
log: log,
|
||||||
|
backup: backup,
|
||||||
|
namespaces: namespaces,
|
||||||
|
resources: resources,
|
||||||
|
labelSelector: labelSelector,
|
||||||
|
dynamicFactory: dynamicFactory,
|
||||||
|
discoveryHelper: discoveryHelper,
|
||||||
|
backedUpItems: backedUpItems,
|
||||||
|
cohabitatingResources: cohabitatingResources,
|
||||||
|
actions: actions,
|
||||||
|
podCommandExecutor: podCommandExecutor,
|
||||||
|
tarWriter: tarWriter,
|
||||||
|
resourceHooks: resourceHooks,
|
||||||
|
|
||||||
|
resourceBackupperFactory: &defaultResourceBackupperFactory{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type groupBackupper interface {
|
||||||
|
backupGroup(group *metav1.APIResourceList) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type defaultGroupBackupper struct {
|
||||||
|
log *logrus.Entry
|
||||||
|
backup *v1.Backup
|
||||||
|
namespaces, resources *collections.IncludesExcludes
|
||||||
|
labelSelector string
|
||||||
|
dynamicFactory client.DynamicFactory
|
||||||
|
discoveryHelper discovery.Helper
|
||||||
|
backedUpItems map[itemKey]struct{}
|
||||||
|
cohabitatingResources map[string]*cohabitatingResource
|
||||||
|
actions map[schema.GroupResource]Action
|
||||||
|
podCommandExecutor podCommandExecutor
|
||||||
|
tarWriter tarWriter
|
||||||
|
resourceHooks []resourceHook
|
||||||
|
resourceBackupperFactory resourceBackupperFactory
|
||||||
|
}
|
||||||
|
|
||||||
|
// backupGroup backs up a single API group.
|
||||||
|
func (gb *defaultGroupBackupper) backupGroup(group *metav1.APIResourceList) error {
|
||||||
|
var (
|
||||||
|
errs []error
|
||||||
|
pv *metav1.APIResource
|
||||||
|
log = gb.log.WithField("group", group.GroupVersion)
|
||||||
|
rb = gb.resourceBackupperFactory.newResourceBackupper(
|
||||||
|
log,
|
||||||
|
gb.backup,
|
||||||
|
gb.namespaces,
|
||||||
|
gb.resources,
|
||||||
|
gb.labelSelector,
|
||||||
|
gb.dynamicFactory,
|
||||||
|
gb.discoveryHelper,
|
||||||
|
gb.backedUpItems,
|
||||||
|
gb.cohabitatingResources,
|
||||||
|
gb.actions,
|
||||||
|
gb.podCommandExecutor,
|
||||||
|
gb.tarWriter,
|
||||||
|
gb.resourceHooks,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
log.Infof("Backing up group")
|
||||||
|
|
||||||
|
processResource := func(resource metav1.APIResource) {
|
||||||
|
if err := rb.backupResource(group, resource); err != nil {
|
||||||
|
errs = append(errs, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, resource := range group.APIResources {
|
||||||
|
// do PVs last because if we're also backing up PVCs, we want to backup PVs within the scope of
|
||||||
|
// the PVCs (within the PVC action) to allow for hooks to run
|
||||||
|
if strings.ToLower(resource.Name) == "persistentvolumes" && strings.ToLower(group.GroupVersion) == "v1" {
|
||||||
|
pvResource := resource
|
||||||
|
pv = &pvResource
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
processResource(resource)
|
||||||
|
}
|
||||||
|
|
||||||
|
if pv != nil {
|
||||||
|
processResource(*pv)
|
||||||
|
}
|
||||||
|
|
||||||
|
return kuberrs.NewAggregate(errs)
|
||||||
|
}
|
|
@ -0,0 +1,182 @@
|
||||||
|
/*
|
||||||
|
Copyright 2017 the Heptio Ark contributors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package backup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||||
|
"github.com/heptio/ark/pkg/client"
|
||||||
|
"github.com/heptio/ark/pkg/discovery"
|
||||||
|
"github.com/heptio/ark/pkg/util/collections"
|
||||||
|
arktest "github.com/heptio/ark/pkg/util/test"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/mock"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestBackupGroup(t *testing.T) {
|
||||||
|
backup := &v1.Backup{}
|
||||||
|
|
||||||
|
namespaces := collections.NewIncludesExcludes().Includes("a")
|
||||||
|
resources := collections.NewIncludesExcludes().Includes("b")
|
||||||
|
labelSelector := "foo=bar"
|
||||||
|
|
||||||
|
dynamicFactory := &arktest.FakeDynamicFactory{}
|
||||||
|
defer dynamicFactory.AssertExpectations(t)
|
||||||
|
|
||||||
|
discoveryHelper := arktest.NewFakeDiscoveryHelper(true, nil)
|
||||||
|
|
||||||
|
backedUpItems := map[itemKey]struct{}{
|
||||||
|
{resource: "a", namespace: "b", name: "c"}: struct{}{},
|
||||||
|
}
|
||||||
|
|
||||||
|
cohabitatingResources := map[string]*cohabitatingResource{
|
||||||
|
"a": {
|
||||||
|
resource: "a",
|
||||||
|
groupResource1: schema.GroupResource{Group: "g1", Resource: "a"},
|
||||||
|
groupResource2: schema.GroupResource{Group: "g2", Resource: "a"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
actions := map[schema.GroupResource]Action{
|
||||||
|
schema.GroupResource{Group: "", Resource: "pods"}: &fakeAction{},
|
||||||
|
}
|
||||||
|
|
||||||
|
podCommandExecutor := &mockPodCommandExecutor{}
|
||||||
|
defer podCommandExecutor.AssertExpectations(t)
|
||||||
|
|
||||||
|
tarWriter := &fakeTarWriter{}
|
||||||
|
|
||||||
|
resourceHooks := []resourceHook{
|
||||||
|
{name: "myhook"},
|
||||||
|
}
|
||||||
|
|
||||||
|
gb := (&defaultGroupBackupperFactory{}).newGroupBackupper(
|
||||||
|
arktest.NewLogger(),
|
||||||
|
backup,
|
||||||
|
namespaces,
|
||||||
|
resources,
|
||||||
|
labelSelector,
|
||||||
|
dynamicFactory,
|
||||||
|
discoveryHelper,
|
||||||
|
backedUpItems,
|
||||||
|
cohabitatingResources,
|
||||||
|
actions,
|
||||||
|
podCommandExecutor,
|
||||||
|
tarWriter,
|
||||||
|
resourceHooks,
|
||||||
|
).(*defaultGroupBackupper)
|
||||||
|
|
||||||
|
resourceBackupperFactory := &mockResourceBackupperFactory{}
|
||||||
|
defer resourceBackupperFactory.AssertExpectations(t)
|
||||||
|
gb.resourceBackupperFactory = resourceBackupperFactory
|
||||||
|
|
||||||
|
resourceBackupper := &mockResourceBackupper{}
|
||||||
|
defer resourceBackupper.AssertExpectations(t)
|
||||||
|
|
||||||
|
resourceBackupperFactory.On("newResourceBackupper",
|
||||||
|
mock.Anything,
|
||||||
|
backup,
|
||||||
|
namespaces,
|
||||||
|
resources,
|
||||||
|
labelSelector,
|
||||||
|
dynamicFactory,
|
||||||
|
discoveryHelper,
|
||||||
|
backedUpItems,
|
||||||
|
cohabitatingResources,
|
||||||
|
actions,
|
||||||
|
podCommandExecutor,
|
||||||
|
tarWriter,
|
||||||
|
resourceHooks,
|
||||||
|
).Return(resourceBackupper)
|
||||||
|
|
||||||
|
group := &metav1.APIResourceList{
|
||||||
|
GroupVersion: "v1",
|
||||||
|
APIResources: []metav1.APIResource{
|
||||||
|
{Name: "persistentvolumes"},
|
||||||
|
{Name: "pods"},
|
||||||
|
{Name: "persistentvolumeclaims"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedOrder := []string{"pods", "persistentvolumeclaims", "persistentvolumes"}
|
||||||
|
var actualOrder []string
|
||||||
|
|
||||||
|
runFunc := func(args mock.Arguments) {
|
||||||
|
actualOrder = append(actualOrder, args.Get(1).(metav1.APIResource).Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
resourceBackupper.On("backupResource", group, metav1.APIResource{Name: "pods"}).Return(nil).Run(runFunc)
|
||||||
|
resourceBackupper.On("backupResource", group, metav1.APIResource{Name: "persistentvolumeclaims"}).Return(nil).Run(runFunc)
|
||||||
|
resourceBackupper.On("backupResource", group, metav1.APIResource{Name: "persistentvolumes"}).Return(nil).Run(runFunc)
|
||||||
|
|
||||||
|
err := gb.backupGroup(group)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// make sure PVs were last
|
||||||
|
assert.Equal(t, expectedOrder, actualOrder)
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockResourceBackupperFactory struct {
|
||||||
|
mock.Mock
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rbf *mockResourceBackupperFactory) newResourceBackupper(
|
||||||
|
log *logrus.Entry,
|
||||||
|
backup *v1.Backup,
|
||||||
|
namespaces *collections.IncludesExcludes,
|
||||||
|
resources *collections.IncludesExcludes,
|
||||||
|
labelSelector string,
|
||||||
|
dynamicFactory client.DynamicFactory,
|
||||||
|
discoveryHelper discovery.Helper,
|
||||||
|
backedUpItems map[itemKey]struct{},
|
||||||
|
cohabitatingResources map[string]*cohabitatingResource,
|
||||||
|
actions map[schema.GroupResource]Action,
|
||||||
|
podCommandExecutor podCommandExecutor,
|
||||||
|
tarWriter tarWriter,
|
||||||
|
resourceHooks []resourceHook,
|
||||||
|
) resourceBackupper {
|
||||||
|
args := rbf.Called(
|
||||||
|
log,
|
||||||
|
backup,
|
||||||
|
namespaces,
|
||||||
|
resources,
|
||||||
|
labelSelector,
|
||||||
|
dynamicFactory,
|
||||||
|
discoveryHelper,
|
||||||
|
backedUpItems,
|
||||||
|
cohabitatingResources,
|
||||||
|
actions,
|
||||||
|
podCommandExecutor,
|
||||||
|
tarWriter,
|
||||||
|
resourceHooks,
|
||||||
|
)
|
||||||
|
return args.Get(0).(resourceBackupper)
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockResourceBackupper struct {
|
||||||
|
mock.Mock
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rb *mockResourceBackupper) backupResource(group *metav1.APIResourceList, resource metav1.APIResource) error {
|
||||||
|
args := rb.Called(group, resource)
|
||||||
|
return args.Error(0)
|
||||||
|
}
|
|
@ -0,0 +1,218 @@
|
||||||
|
/*
|
||||||
|
Copyright 2017 the Heptio Ark contributors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package backup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"encoding/json"
|
||||||
|
"path/filepath"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||||
|
"github.com/heptio/ark/pkg/client"
|
||||||
|
"github.com/heptio/ark/pkg/discovery"
|
||||||
|
"github.com/heptio/ark/pkg/util/collections"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
type itemBackupperFactory interface {
|
||||||
|
newItemBackupper(
|
||||||
|
backup *api.Backup,
|
||||||
|
namespaces, resources *collections.IncludesExcludes,
|
||||||
|
backedUpItems map[itemKey]struct{},
|
||||||
|
actions map[schema.GroupResource]Action,
|
||||||
|
podCommandExecutor podCommandExecutor,
|
||||||
|
tarWriter tarWriter,
|
||||||
|
resourceHooks []resourceHook,
|
||||||
|
dynamicFactory client.DynamicFactory,
|
||||||
|
discoveryHelper discovery.Helper,
|
||||||
|
) ItemBackupper
|
||||||
|
}
|
||||||
|
|
||||||
|
type defaultItemBackupperFactory struct{}
|
||||||
|
|
||||||
|
func (f *defaultItemBackupperFactory) newItemBackupper(
|
||||||
|
backup *api.Backup,
|
||||||
|
namespaces, resources *collections.IncludesExcludes,
|
||||||
|
backedUpItems map[itemKey]struct{},
|
||||||
|
actions map[schema.GroupResource]Action,
|
||||||
|
podCommandExecutor podCommandExecutor,
|
||||||
|
tarWriter tarWriter,
|
||||||
|
resourceHooks []resourceHook,
|
||||||
|
dynamicFactory client.DynamicFactory,
|
||||||
|
discoveryHelper discovery.Helper,
|
||||||
|
) ItemBackupper {
|
||||||
|
ib := &defaultItemBackupper{
|
||||||
|
backup: backup,
|
||||||
|
namespaces: namespaces,
|
||||||
|
resources: resources,
|
||||||
|
backedUpItems: backedUpItems,
|
||||||
|
actions: actions,
|
||||||
|
tarWriter: tarWriter,
|
||||||
|
resourceHooks: resourceHooks,
|
||||||
|
dynamicFactory: dynamicFactory,
|
||||||
|
discoveryHelper: discoveryHelper,
|
||||||
|
|
||||||
|
itemHookHandler: &defaultItemHookHandler{
|
||||||
|
podCommandExecutor: podCommandExecutor,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// this is for testing purposes
|
||||||
|
ib.additionalItemBackupper = ib
|
||||||
|
|
||||||
|
return ib
|
||||||
|
}
|
||||||
|
|
||||||
|
type ItemBackupper interface {
|
||||||
|
backupItem(logger *logrus.Entry, obj runtime.Unstructured, groupResource schema.GroupResource) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type defaultItemBackupper struct {
|
||||||
|
backup *api.Backup
|
||||||
|
namespaces *collections.IncludesExcludes
|
||||||
|
resources *collections.IncludesExcludes
|
||||||
|
backedUpItems map[itemKey]struct{}
|
||||||
|
actions map[schema.GroupResource]Action
|
||||||
|
tarWriter tarWriter
|
||||||
|
resourceHooks []resourceHook
|
||||||
|
dynamicFactory client.DynamicFactory
|
||||||
|
discoveryHelper discovery.Helper
|
||||||
|
|
||||||
|
itemHookHandler itemHookHandler
|
||||||
|
additionalItemBackupper ItemBackupper
|
||||||
|
}
|
||||||
|
|
||||||
|
var podsGroupResource = schema.GroupResource{Group: "", Resource: "pods"}
|
||||||
|
|
||||||
|
// backupItem backs up an individual item to tarWriter. The item may be excluded based on the
|
||||||
|
// namespaces IncludesExcludes list.
|
||||||
|
func (ib *defaultItemBackupper) backupItem(logger *logrus.Entry, obj runtime.Unstructured, groupResource schema.GroupResource) error {
|
||||||
|
metadata, err := meta.Accessor(obj)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace := metadata.GetNamespace()
|
||||||
|
name := metadata.GetName()
|
||||||
|
|
||||||
|
log := logger.WithField("name", name)
|
||||||
|
if namespace != "" {
|
||||||
|
log = log.WithField("namespace", namespace)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: we have to re-check namespace & resource includes/excludes because it's possible that
|
||||||
|
// backupItem can be invoked by a custom action.
|
||||||
|
if namespace != "" && !ib.namespaces.ShouldInclude(namespace) {
|
||||||
|
log.Info("Excluding item because namespace is excluded")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if namespace == "" && ib.backup.Spec.IncludeClusterResources != nil && !*ib.backup.Spec.IncludeClusterResources {
|
||||||
|
log.Info("Excluding item because resource is cluster-scoped and backup.spec.includeClusterResources is false")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !ib.resources.ShouldInclude(groupResource.String()) {
|
||||||
|
log.Info("Excluding item because resource is excluded")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
key := itemKey{
|
||||||
|
resource: groupResource.String(),
|
||||||
|
namespace: namespace,
|
||||||
|
name: name,
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, exists := ib.backedUpItems[key]; exists {
|
||||||
|
log.Info("Skipping item because it's already been backed up.")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
ib.backedUpItems[key] = struct{}{}
|
||||||
|
|
||||||
|
log.Info("Backing up resource")
|
||||||
|
|
||||||
|
item := obj.UnstructuredContent()
|
||||||
|
// Never save status
|
||||||
|
delete(item, "status")
|
||||||
|
|
||||||
|
if err := ib.itemHookHandler.handleHooks(log, groupResource, obj, ib.resourceHooks); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if action, found := ib.actions[groupResource]; found {
|
||||||
|
log.Info("Executing custom action")
|
||||||
|
|
||||||
|
if additionalItemIdentifiers, err := action.Execute(log, obj, ib.backup); err == nil {
|
||||||
|
for _, additionalItem := range additionalItemIdentifiers {
|
||||||
|
gvr, resource, err := ib.discoveryHelper.ResourceFor(additionalItem.GroupResource.WithVersion(""))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := ib.dynamicFactory.ClientForGroupVersionResource(gvr.GroupVersion(), resource, additionalItem.Namespace)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
additionalItem, err := client.Get(additionalItem.Name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ib.additionalItemBackupper.backupItem(log, additionalItem, gvr.GroupResource())
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return errors.Wrap(err, "error executing custom action")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var filePath string
|
||||||
|
if namespace != "" {
|
||||||
|
filePath = filepath.Join(api.ResourcesDir, groupResource.String(), api.NamespaceScopedDir, namespace, name+".json")
|
||||||
|
} else {
|
||||||
|
filePath = filepath.Join(api.ResourcesDir, groupResource.String(), api.ClusterScopedDir, name+".json")
|
||||||
|
}
|
||||||
|
|
||||||
|
itemBytes, err := json.Marshal(item)
|
||||||
|
if err != nil {
|
||||||
|
return errors.WithStack(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
hdr := &tar.Header{
|
||||||
|
Name: filePath,
|
||||||
|
Size: int64(len(itemBytes)),
|
||||||
|
Typeflag: tar.TypeReg,
|
||||||
|
Mode: 0755,
|
||||||
|
ModTime: time.Now(),
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ib.tarWriter.WriteHeader(hdr); err != nil {
|
||||||
|
return errors.WithStack(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := ib.tarWriter.Write(itemBytes); err != nil {
|
||||||
|
return errors.WithStack(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,384 @@
|
||||||
|
/*
|
||||||
|
Copyright 2017 the Heptio Ark contributors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package backup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||||
|
"github.com/heptio/ark/pkg/util/collections"
|
||||||
|
arktest "github.com/heptio/ark/pkg/util/test"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/mock"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestBackupItemSkips(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
testName string
|
||||||
|
namespace string
|
||||||
|
name string
|
||||||
|
namespaces *collections.IncludesExcludes
|
||||||
|
groupResource schema.GroupResource
|
||||||
|
resources *collections.IncludesExcludes
|
||||||
|
backedUpItems map[itemKey]struct{}
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
testName: "namespace not in includes list",
|
||||||
|
namespace: "ns",
|
||||||
|
name: "foo",
|
||||||
|
namespaces: collections.NewIncludesExcludes().Includes("a"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testName: "namespace in excludes list",
|
||||||
|
namespace: "ns",
|
||||||
|
name: "foo",
|
||||||
|
namespaces: collections.NewIncludesExcludes().Excludes("ns"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testName: "resource not in includes list",
|
||||||
|
namespace: "ns",
|
||||||
|
name: "foo",
|
||||||
|
groupResource: schema.GroupResource{Group: "foo", Resource: "bar"},
|
||||||
|
namespaces: collections.NewIncludesExcludes(),
|
||||||
|
resources: collections.NewIncludesExcludes().Includes("a.b"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testName: "resource in excludes list",
|
||||||
|
namespace: "ns",
|
||||||
|
name: "foo",
|
||||||
|
groupResource: schema.GroupResource{Group: "foo", Resource: "bar"},
|
||||||
|
namespaces: collections.NewIncludesExcludes(),
|
||||||
|
resources: collections.NewIncludesExcludes().Excludes("bar.foo"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testName: "resource already backed up",
|
||||||
|
namespace: "ns",
|
||||||
|
name: "foo",
|
||||||
|
groupResource: schema.GroupResource{Group: "foo", Resource: "bar"},
|
||||||
|
namespaces: collections.NewIncludesExcludes(),
|
||||||
|
resources: collections.NewIncludesExcludes(),
|
||||||
|
backedUpItems: map[itemKey]struct{}{
|
||||||
|
{resource: "bar.foo", namespace: "ns", name: "foo"}: struct{}{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.testName, func(t *testing.T) {
|
||||||
|
ib := &defaultItemBackupper{
|
||||||
|
namespaces: test.namespaces,
|
||||||
|
resources: test.resources,
|
||||||
|
backedUpItems: test.backedUpItems,
|
||||||
|
}
|
||||||
|
|
||||||
|
u := unstructuredOrDie(fmt.Sprintf(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"%s","name":"%s"}}`, test.namespace, test.name))
|
||||||
|
err := ib.backupItem(arktest.NewLogger(), u, test.groupResource)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackupItemNoSkips(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
item string
|
||||||
|
namespaceIncludesExcludes *collections.IncludesExcludes
|
||||||
|
expectError bool
|
||||||
|
expectExcluded bool
|
||||||
|
expectedTarHeaderName string
|
||||||
|
tarWriteError bool
|
||||||
|
tarHeaderWriteError bool
|
||||||
|
customAction bool
|
||||||
|
expectedActionID string
|
||||||
|
customActionAdditionalItemIdentifiers []ResourceIdentifier
|
||||||
|
customActionAdditionalItems []runtime.Unstructured
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "explicit namespace include",
|
||||||
|
item: `{"metadata":{"namespace":"foo","name":"bar"}}`,
|
||||||
|
namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("foo"),
|
||||||
|
expectError: false,
|
||||||
|
expectExcluded: false,
|
||||||
|
expectedTarHeaderName: "resources/resource.group/namespaces/foo/bar.json",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "* namespace include",
|
||||||
|
item: `{"metadata":{"namespace":"foo","name":"bar"}}`,
|
||||||
|
namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
|
||||||
|
expectError: false,
|
||||||
|
expectExcluded: false,
|
||||||
|
expectedTarHeaderName: "resources/resource.group/namespaces/foo/bar.json",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "cluster-scoped",
|
||||||
|
item: `{"metadata":{"name":"bar"}}`,
|
||||||
|
expectError: false,
|
||||||
|
expectExcluded: false,
|
||||||
|
expectedTarHeaderName: "resources/resource.group/cluster/bar.json",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "make sure status is deleted",
|
||||||
|
item: `{"metadata":{"name":"bar"},"spec":{"color":"green"},"status":{"foo":"bar"}}`,
|
||||||
|
expectError: false,
|
||||||
|
expectExcluded: false,
|
||||||
|
expectedTarHeaderName: "resources/resource.group/cluster/bar.json",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "tar header write error",
|
||||||
|
item: `{"metadata":{"name":"bar"},"spec":{"color":"green"},"status":{"foo":"bar"}}`,
|
||||||
|
expectError: true,
|
||||||
|
tarHeaderWriteError: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "tar write error",
|
||||||
|
item: `{"metadata":{"name":"bar"},"spec":{"color":"green"},"status":{"foo":"bar"}}`,
|
||||||
|
expectError: true,
|
||||||
|
tarWriteError: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "action invoked - cluster-scoped",
|
||||||
|
namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
|
||||||
|
item: `{"metadata":{"name":"bar"}}`,
|
||||||
|
expectError: false,
|
||||||
|
expectExcluded: false,
|
||||||
|
expectedTarHeaderName: "resources/resource.group/cluster/bar.json",
|
||||||
|
customAction: true,
|
||||||
|
expectedActionID: "bar",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "action invoked - namespaced",
|
||||||
|
namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
|
||||||
|
item: `{"metadata":{"namespace": "myns", "name":"bar"}}`,
|
||||||
|
expectError: false,
|
||||||
|
expectExcluded: false,
|
||||||
|
expectedTarHeaderName: "resources/resource.group/namespaces/myns/bar.json",
|
||||||
|
customAction: true,
|
||||||
|
expectedActionID: "myns/bar",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "action invoked - additional items",
|
||||||
|
namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
|
||||||
|
item: `{"metadata":{"namespace": "myns", "name":"bar"}}`,
|
||||||
|
expectError: false,
|
||||||
|
expectExcluded: false,
|
||||||
|
expectedTarHeaderName: "resources/resource.group/namespaces/myns/bar.json",
|
||||||
|
customAction: true,
|
||||||
|
expectedActionID: "myns/bar",
|
||||||
|
customActionAdditionalItemIdentifiers: []ResourceIdentifier{
|
||||||
|
{
|
||||||
|
GroupResource: schema.GroupResource{Group: "g1", Resource: "r1"},
|
||||||
|
Namespace: "ns1",
|
||||||
|
Name: "n1",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
GroupResource: schema.GroupResource{Group: "g2", Resource: "r2"},
|
||||||
|
Namespace: "ns2",
|
||||||
|
Name: "n2",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
customActionAdditionalItems: []runtime.Unstructured{
|
||||||
|
unstructuredOrDie(`{"apiVersion":"g1/v1","kind":"r1","metadata":{"namespace":"ns1","name":"n1"}}`),
|
||||||
|
unstructuredOrDie(`{"apiVersion":"g2/v1","kind":"r1","metadata":{"namespace":"ns2","name":"n2"}}`),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
var (
|
||||||
|
actions map[schema.GroupResource]Action
|
||||||
|
action *fakeAction
|
||||||
|
backup = &v1.Backup{}
|
||||||
|
groupResource = schema.ParseGroupResource("resource.group")
|
||||||
|
backedUpItems = make(map[itemKey]struct{})
|
||||||
|
resources = collections.NewIncludesExcludes()
|
||||||
|
w = &fakeTarWriter{}
|
||||||
|
)
|
||||||
|
|
||||||
|
item, err := getAsMap(test.item)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
namespaces := test.namespaceIncludesExcludes
|
||||||
|
if namespaces == nil {
|
||||||
|
namespaces = collections.NewIncludesExcludes()
|
||||||
|
}
|
||||||
|
|
||||||
|
if test.tarHeaderWriteError {
|
||||||
|
w.writeHeaderError = errors.New("error")
|
||||||
|
}
|
||||||
|
if test.tarWriteError {
|
||||||
|
w.writeError = errors.New("error")
|
||||||
|
}
|
||||||
|
|
||||||
|
if test.customAction {
|
||||||
|
action = &fakeAction{
|
||||||
|
additionalItems: test.customActionAdditionalItemIdentifiers,
|
||||||
|
}
|
||||||
|
actions = map[schema.GroupResource]Action{
|
||||||
|
groupResource: action,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resourceHooks := []resourceHook{}
|
||||||
|
|
||||||
|
podCommandExecutor := &mockPodCommandExecutor{}
|
||||||
|
defer podCommandExecutor.AssertExpectations(t)
|
||||||
|
|
||||||
|
dynamicFactory := &arktest.FakeDynamicFactory{}
|
||||||
|
defer dynamicFactory.AssertExpectations(t)
|
||||||
|
|
||||||
|
discoveryHelper := arktest.NewFakeDiscoveryHelper(true, nil)
|
||||||
|
|
||||||
|
b := (&defaultItemBackupperFactory{}).newItemBackupper(
|
||||||
|
backup,
|
||||||
|
namespaces,
|
||||||
|
resources,
|
||||||
|
backedUpItems,
|
||||||
|
actions,
|
||||||
|
podCommandExecutor,
|
||||||
|
w,
|
||||||
|
resourceHooks,
|
||||||
|
dynamicFactory,
|
||||||
|
discoveryHelper,
|
||||||
|
).(*defaultItemBackupper)
|
||||||
|
|
||||||
|
// make sure the podCommandExecutor was set correctly in the real hook handler
|
||||||
|
assert.Equal(t, podCommandExecutor, b.itemHookHandler.(*defaultItemHookHandler).podCommandExecutor)
|
||||||
|
|
||||||
|
itemHookHandler := &mockItemHookHandler{}
|
||||||
|
defer itemHookHandler.AssertExpectations(t)
|
||||||
|
b.itemHookHandler = itemHookHandler
|
||||||
|
|
||||||
|
additionalItemBackupper := &mockItemBackupper{}
|
||||||
|
defer additionalItemBackupper.AssertExpectations(t)
|
||||||
|
b.additionalItemBackupper = additionalItemBackupper
|
||||||
|
|
||||||
|
obj := &unstructured.Unstructured{Object: item}
|
||||||
|
itemHookHandler.On("handleHooks", mock.Anything, groupResource, obj, resourceHooks).Return(nil)
|
||||||
|
|
||||||
|
for i, item := range test.customActionAdditionalItemIdentifiers {
|
||||||
|
itemClient := &arktest.FakeDynamicClient{}
|
||||||
|
defer itemClient.AssertExpectations(t)
|
||||||
|
|
||||||
|
dynamicFactory.On("ClientForGroupVersionResource", item.GroupResource.WithVersion("").GroupVersion(), metav1.APIResource{Name: item.Resource}, item.Namespace).Return(itemClient, nil)
|
||||||
|
|
||||||
|
itemClient.On("Get", item.Name, metav1.GetOptions{}).Return(test.customActionAdditionalItems[i], nil)
|
||||||
|
|
||||||
|
additionalItemBackupper.On("backupItem", mock.AnythingOfType("*logrus.Entry"), test.customActionAdditionalItems[i], item.GroupResource).Return(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = b.backupItem(arktest.NewLogger(), obj, groupResource)
|
||||||
|
gotError := err != nil
|
||||||
|
if e, a := test.expectError, gotError; e != a {
|
||||||
|
t.Fatalf("error: expected %t, got %t", e, a)
|
||||||
|
}
|
||||||
|
if test.expectError {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if test.expectExcluded {
|
||||||
|
if len(w.headers) > 0 {
|
||||||
|
t.Errorf("unexpected header write")
|
||||||
|
}
|
||||||
|
if len(w.data) > 0 {
|
||||||
|
t.Errorf("unexpected data write")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// we have to delete status as that's what backupItem does,
|
||||||
|
// and this ensures that we're verifying the right data
|
||||||
|
delete(item, "status")
|
||||||
|
itemWithoutStatus, err := json.Marshal(&item)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(w.headers), "headers")
|
||||||
|
assert.Equal(t, test.expectedTarHeaderName, w.headers[0].Name, "header.name")
|
||||||
|
assert.Equal(t, int64(len(itemWithoutStatus)), w.headers[0].Size, "header.size")
|
||||||
|
assert.Equal(t, byte(tar.TypeReg), w.headers[0].Typeflag, "header.typeflag")
|
||||||
|
assert.Equal(t, int64(0755), w.headers[0].Mode, "header.mode")
|
||||||
|
assert.False(t, w.headers[0].ModTime.IsZero(), "header.modTime set")
|
||||||
|
assert.Equal(t, 1, len(w.data), "# of data")
|
||||||
|
|
||||||
|
actual, err := getAsMap(string(w.data[0]))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if e, a := item, actual; !reflect.DeepEqual(e, a) {
|
||||||
|
t.Errorf("data: expected %s, got %s", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
|
if test.customAction {
|
||||||
|
if len(action.ids) != 1 {
|
||||||
|
t.Errorf("unexpected custom action ids: %v", action.ids)
|
||||||
|
} else if e, a := test.expectedActionID, action.ids[0]; e != a {
|
||||||
|
t.Errorf("action.ids[0]: expected %s, got %s", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(action.backups) != 1 {
|
||||||
|
t.Errorf("unexpected custom action backups: %#v", action.backups)
|
||||||
|
} else if e, a := backup, action.backups[0]; e != a {
|
||||||
|
t.Errorf("action.backups[0]: expected %#v, got %#v", e, a)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type fakeTarWriter struct {
|
||||||
|
closeCalled bool
|
||||||
|
headers []*tar.Header
|
||||||
|
data [][]byte
|
||||||
|
writeHeaderError error
|
||||||
|
writeError error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *fakeTarWriter) Close() error { return nil }
|
||||||
|
|
||||||
|
func (w *fakeTarWriter) Write(data []byte) (int, error) {
|
||||||
|
w.data = append(w.data, data)
|
||||||
|
return 0, w.writeError
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *fakeTarWriter) WriteHeader(header *tar.Header) error {
|
||||||
|
w.headers = append(w.headers, header)
|
||||||
|
return w.writeHeaderError
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockItemBackupper struct {
|
||||||
|
mock.Mock
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ib *mockItemBackupper) backupItem(logger *logrus.Entry, obj runtime.Unstructured, groupResource schema.GroupResource) error {
|
||||||
|
args := ib.Called(logger, obj, groupResource)
|
||||||
|
return args.Error(0)
|
||||||
|
}
|
|
@ -0,0 +1,186 @@
|
||||||
|
/*
|
||||||
|
Copyright 2017 the Heptio Ark contributors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package backup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||||
|
"github.com/heptio/ark/pkg/util/collections"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
// itemHookHandler invokes hooks for an item.
|
||||||
|
type itemHookHandler interface {
|
||||||
|
// handleHooks invokes hooks for an item. If the item is a pod and the appropriate annotations exist
|
||||||
|
// to specify a hook, that is executed. Otherwise, this looks at the backup context's Backup to
|
||||||
|
// determine if there are any hooks relevant to the item, taking into account the hook spec's
|
||||||
|
// namespaces, resources, and label selector.
|
||||||
|
handleHooks(log *logrus.Entry, groupResource schema.GroupResource, obj runtime.Unstructured, resourceHooks []resourceHook) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaultItemHookHandler is the default itemHookHandler.
|
||||||
|
type defaultItemHookHandler struct {
|
||||||
|
podCommandExecutor podCommandExecutor
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *defaultItemHookHandler) handleHooks(
|
||||||
|
log *logrus.Entry,
|
||||||
|
groupResource schema.GroupResource,
|
||||||
|
obj runtime.Unstructured,
|
||||||
|
resourceHooks []resourceHook,
|
||||||
|
) error {
|
||||||
|
// We only support hooks on pods right now
|
||||||
|
if groupResource != podsGroupResource {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata, err := meta.Accessor(obj)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "unable to get a metadata accessor")
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace := metadata.GetNamespace()
|
||||||
|
name := metadata.GetName()
|
||||||
|
|
||||||
|
// If the pod has the hook specified via annotations, that takes priority.
|
||||||
|
if hookFromAnnotations := getPodExecHookFromAnnotations(metadata.GetAnnotations()); hookFromAnnotations != nil {
|
||||||
|
hookLog := log.WithFields(
|
||||||
|
logrus.Fields{
|
||||||
|
"hookSource": "annotation",
|
||||||
|
"hookType": "exec",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err := h.podCommandExecutor.executePodCommand(hookLog, obj.UnstructuredContent(), namespace, name, "<from-annotation>", hookFromAnnotations); err != nil {
|
||||||
|
hookLog.WithError(err).Error("Error executing hook")
|
||||||
|
if hookFromAnnotations.OnError == api.HookErrorModeFail {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
labels := labels.Set(metadata.GetLabels())
|
||||||
|
// Otherwise, check for hooks defined in the backup spec.
|
||||||
|
for _, resourceHook := range resourceHooks {
|
||||||
|
if !resourceHook.applicableTo(groupResource, namespace, labels) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hook := range resourceHook.hooks {
|
||||||
|
if groupResource == podsGroupResource {
|
||||||
|
if hook.Exec != nil {
|
||||||
|
hookLog := log.WithFields(
|
||||||
|
logrus.Fields{
|
||||||
|
"hookSource": "backupSpec",
|
||||||
|
"hookType": "exec",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
err := h.podCommandExecutor.executePodCommand(hookLog, obj.UnstructuredContent(), namespace, name, resourceHook.name, hook.Exec)
|
||||||
|
if err != nil {
|
||||||
|
hookLog.WithError(err).Error("Error executing hook")
|
||||||
|
if hook.Exec.OnError == api.HookErrorModeFail {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
podBackupHookContainerAnnotationKey = "hook.backup.ark.heptio.com/container"
|
||||||
|
podBackupHookCommandAnnotationKey = "hook.backup.ark.heptio.com/command"
|
||||||
|
podBackupHookOnErrorAnnotationKey = "hook.backup.ark.heptio.com/on-error"
|
||||||
|
podBackupHookTimeoutAnnotationKey = "hook.backup.ark.heptio.com/timeout"
|
||||||
|
defaultHookOnError = api.HookErrorModeFail
|
||||||
|
defaultHookTimeout = 30 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
// getPodExecHookFromAnnotations returns an ExecHook based on the annotations, as long as the
|
||||||
|
// 'command' annotation is present. If it is absent, this returns nil.
|
||||||
|
func getPodExecHookFromAnnotations(annotations map[string]string) *api.ExecHook {
|
||||||
|
container := annotations[podBackupHookContainerAnnotationKey]
|
||||||
|
|
||||||
|
commandValue, ok := annotations[podBackupHookCommandAnnotationKey]
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var command []string
|
||||||
|
// check for json array
|
||||||
|
if commandValue[0] == '[' {
|
||||||
|
if err := json.Unmarshal([]byte(commandValue), &command); err != nil {
|
||||||
|
command = []string{commandValue}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
command = append(command, commandValue)
|
||||||
|
}
|
||||||
|
|
||||||
|
onError := api.HookErrorMode(annotations[podBackupHookOnErrorAnnotationKey])
|
||||||
|
if onError != api.HookErrorModeContinue && onError != api.HookErrorModeFail {
|
||||||
|
onError = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
var timeout time.Duration
|
||||||
|
timeoutString := annotations[podBackupHookTimeoutAnnotationKey]
|
||||||
|
if timeoutString != "" {
|
||||||
|
if temp, err := time.ParseDuration(timeoutString); err == nil {
|
||||||
|
timeout = temp
|
||||||
|
} else {
|
||||||
|
// TODO: log error that we couldn't parse duration
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &api.ExecHook{
|
||||||
|
Container: container,
|
||||||
|
Command: command,
|
||||||
|
OnError: onError,
|
||||||
|
Timeout: metav1.Duration{Duration: timeout},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type resourceHook struct {
|
||||||
|
name string
|
||||||
|
namespaces *collections.IncludesExcludes
|
||||||
|
resources *collections.IncludesExcludes
|
||||||
|
labelSelector labels.Selector
|
||||||
|
hooks []api.BackupResourceHook
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r resourceHook) applicableTo(groupResource schema.GroupResource, namespace string, labels labels.Set) bool {
|
||||||
|
if r.namespaces != nil && !r.namespaces.ShouldInclude(namespace) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if r.resources != nil && !r.resources.ShouldInclude(groupResource.String()) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if r.labelSelector != nil && !r.labelSelector.Matches(labels) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
|
@ -0,0 +1,583 @@
|
||||||
|
/*
|
||||||
|
Copyright 2017 the Heptio Ark contributors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package backup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||||
|
"github.com/heptio/ark/pkg/util/collections"
|
||||||
|
arktest "github.com/heptio/ark/pkg/util/test"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/mock"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
type mockItemHookHandler struct {
|
||||||
|
mock.Mock
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *mockItemHookHandler) handleHooks(log *logrus.Entry, groupResource schema.GroupResource, obj runtime.Unstructured, resourceHooks []resourceHook) error {
|
||||||
|
args := h.Called(log, groupResource, obj, resourceHooks)
|
||||||
|
return args.Error(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHandleHooksSkips(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
groupResource string
|
||||||
|
item runtime.Unstructured
|
||||||
|
hooks []resourceHook
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "not a pod",
|
||||||
|
groupResource: "widget.group",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "pod without annotation / no spec hooks",
|
||||||
|
item: unstructuredOrDie(
|
||||||
|
`
|
||||||
|
{
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"kind": "Pod",
|
||||||
|
"metadata": {
|
||||||
|
"namespace": "ns",
|
||||||
|
"name": "foo"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`,
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "spec hooks not applicable",
|
||||||
|
groupResource: "pods",
|
||||||
|
item: unstructuredOrDie(
|
||||||
|
`
|
||||||
|
{
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"kind": "Pod",
|
||||||
|
"metadata": {
|
||||||
|
"namespace": "ns",
|
||||||
|
"name": "foo",
|
||||||
|
"labels": {
|
||||||
|
"color": "blue"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`,
|
||||||
|
),
|
||||||
|
hooks: []resourceHook{
|
||||||
|
{
|
||||||
|
name: "ns exclude",
|
||||||
|
namespaces: collections.NewIncludesExcludes().Excludes("ns"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "resource exclude",
|
||||||
|
resources: collections.NewIncludesExcludes().Includes("widgets.group"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "label selector mismatch",
|
||||||
|
labelSelector: parseLabelSelectorOrDie("color=green"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "missing exec hook",
|
||||||
|
hooks: []v1.BackupResourceHook{
|
||||||
|
{},
|
||||||
|
{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
podCommandExecutor := &mockPodCommandExecutor{}
|
||||||
|
defer podCommandExecutor.AssertExpectations(t)
|
||||||
|
|
||||||
|
h := &defaultItemHookHandler{
|
||||||
|
podCommandExecutor: podCommandExecutor,
|
||||||
|
}
|
||||||
|
|
||||||
|
groupResource := schema.ParseGroupResource(test.groupResource)
|
||||||
|
err := h.handleHooks(arktest.NewLogger(), groupResource, test.item, test.hooks)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHandleHooksPodFromPodAnnotation(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
groupResource string
|
||||||
|
item runtime.Unstructured
|
||||||
|
hooks []resourceHook
|
||||||
|
hookErrorsByContainer map[string]error
|
||||||
|
expectedError error
|
||||||
|
expectedPodHook *v1.ExecHook
|
||||||
|
expectedPodHookError error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "pod, no annotation, spec (multiple hooks) = run spec",
|
||||||
|
groupResource: "pods",
|
||||||
|
item: unstructuredOrDie(`
|
||||||
|
{
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"kind": "Pod",
|
||||||
|
"metadata": {
|
||||||
|
"namespace": "ns",
|
||||||
|
"name": "name"
|
||||||
|
}
|
||||||
|
}`),
|
||||||
|
hooks: []resourceHook{
|
||||||
|
{
|
||||||
|
name: "hook1",
|
||||||
|
hooks: []v1.BackupResourceHook{
|
||||||
|
{
|
||||||
|
Exec: &v1.ExecHook{
|
||||||
|
Container: "1a",
|
||||||
|
Command: []string{"1a"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Exec: &v1.ExecHook{
|
||||||
|
Container: "1b",
|
||||||
|
Command: []string{"1b"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "hook2",
|
||||||
|
hooks: []v1.BackupResourceHook{
|
||||||
|
{
|
||||||
|
Exec: &v1.ExecHook{
|
||||||
|
Container: "2a",
|
||||||
|
Command: []string{"2a"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Exec: &v1.ExecHook{
|
||||||
|
Container: "2b",
|
||||||
|
Command: []string{"2b"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "pod, annotation, no spec = run annotation",
|
||||||
|
groupResource: "pods",
|
||||||
|
item: unstructuredOrDie(`
|
||||||
|
{
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"kind": "Pod",
|
||||||
|
"metadata": {
|
||||||
|
"namespace": "ns",
|
||||||
|
"name": "name",
|
||||||
|
"annotations": {
|
||||||
|
"hook.backup.ark.heptio.com/container": "c",
|
||||||
|
"hook.backup.ark.heptio.com/command": "/bin/ls"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}`),
|
||||||
|
expectedPodHook: &v1.ExecHook{
|
||||||
|
Container: "c",
|
||||||
|
Command: []string{"/bin/ls"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "pod, annotation & spec = run annotation",
|
||||||
|
groupResource: "pods",
|
||||||
|
item: unstructuredOrDie(`
|
||||||
|
{
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"kind": "Pod",
|
||||||
|
"metadata": {
|
||||||
|
"namespace": "ns",
|
||||||
|
"name": "name",
|
||||||
|
"annotations": {
|
||||||
|
"hook.backup.ark.heptio.com/container": "c",
|
||||||
|
"hook.backup.ark.heptio.com/command": "/bin/ls"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}`),
|
||||||
|
expectedPodHook: &v1.ExecHook{
|
||||||
|
Container: "c",
|
||||||
|
Command: []string{"/bin/ls"},
|
||||||
|
},
|
||||||
|
hooks: []resourceHook{
|
||||||
|
{
|
||||||
|
name: "hook1",
|
||||||
|
hooks: []v1.BackupResourceHook{
|
||||||
|
{
|
||||||
|
Exec: &v1.ExecHook{
|
||||||
|
Container: "1a",
|
||||||
|
Command: []string{"1a"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "pod, annotation, onError=fail = return error",
|
||||||
|
groupResource: "pods",
|
||||||
|
item: unstructuredOrDie(`
|
||||||
|
{
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"kind": "Pod",
|
||||||
|
"metadata": {
|
||||||
|
"namespace": "ns",
|
||||||
|
"name": "name",
|
||||||
|
"annotations": {
|
||||||
|
"hook.backup.ark.heptio.com/container": "c",
|
||||||
|
"hook.backup.ark.heptio.com/command": "/bin/ls",
|
||||||
|
"hook.backup.ark.heptio.com/on-error": "Fail"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}`),
|
||||||
|
expectedPodHook: &v1.ExecHook{
|
||||||
|
Container: "c",
|
||||||
|
Command: []string{"/bin/ls"},
|
||||||
|
OnError: v1.HookErrorModeFail,
|
||||||
|
},
|
||||||
|
expectedPodHookError: errors.New("pod hook error"),
|
||||||
|
expectedError: errors.New("pod hook error"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "pod, annotation, onError=continue = return nil",
|
||||||
|
groupResource: "pods",
|
||||||
|
item: unstructuredOrDie(`
|
||||||
|
{
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"kind": "Pod",
|
||||||
|
"metadata": {
|
||||||
|
"namespace": "ns",
|
||||||
|
"name": "name",
|
||||||
|
"annotations": {
|
||||||
|
"hook.backup.ark.heptio.com/container": "c",
|
||||||
|
"hook.backup.ark.heptio.com/command": "/bin/ls",
|
||||||
|
"hook.backup.ark.heptio.com/on-error": "Continue"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}`),
|
||||||
|
expectedPodHook: &v1.ExecHook{
|
||||||
|
Container: "c",
|
||||||
|
Command: []string{"/bin/ls"},
|
||||||
|
OnError: v1.HookErrorModeContinue,
|
||||||
|
},
|
||||||
|
expectedPodHookError: errors.New("pod hook error"),
|
||||||
|
expectedError: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "pod, spec, onError=fail = don't run other hooks",
|
||||||
|
groupResource: "pods",
|
||||||
|
item: unstructuredOrDie(`
|
||||||
|
{
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"kind": "Pod",
|
||||||
|
"metadata": {
|
||||||
|
"namespace": "ns",
|
||||||
|
"name": "name"
|
||||||
|
}
|
||||||
|
}`),
|
||||||
|
hooks: []resourceHook{
|
||||||
|
{
|
||||||
|
name: "hook1",
|
||||||
|
hooks: []v1.BackupResourceHook{
|
||||||
|
{
|
||||||
|
Exec: &v1.ExecHook{
|
||||||
|
Container: "1a",
|
||||||
|
Command: []string{"1a"},
|
||||||
|
OnError: v1.HookErrorModeContinue,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Exec: &v1.ExecHook{
|
||||||
|
Container: "1b",
|
||||||
|
Command: []string{"1b"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "hook2",
|
||||||
|
hooks: []v1.BackupResourceHook{
|
||||||
|
{
|
||||||
|
Exec: &v1.ExecHook{
|
||||||
|
Container: "2",
|
||||||
|
Command: []string{"2"},
|
||||||
|
OnError: v1.HookErrorModeFail,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "hook3",
|
||||||
|
hooks: []v1.BackupResourceHook{
|
||||||
|
{
|
||||||
|
Exec: &v1.ExecHook{
|
||||||
|
Container: "3",
|
||||||
|
Command: []string{"3"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
hookErrorsByContainer: map[string]error{
|
||||||
|
"1a": errors.New("1a error, but continue"),
|
||||||
|
"2": errors.New("2 error, fail"),
|
||||||
|
},
|
||||||
|
expectedError: errors.New("2 error, fail"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
podCommandExecutor := &mockPodCommandExecutor{}
|
||||||
|
defer podCommandExecutor.AssertExpectations(t)
|
||||||
|
|
||||||
|
h := &defaultItemHookHandler{
|
||||||
|
podCommandExecutor: podCommandExecutor,
|
||||||
|
}
|
||||||
|
|
||||||
|
if test.expectedPodHook != nil {
|
||||||
|
podCommandExecutor.On("executePodCommand", mock.Anything, test.item.UnstructuredContent(), "ns", "name", "<from-annotation>", test.expectedPodHook).Return(test.expectedPodHookError)
|
||||||
|
} else {
|
||||||
|
hookLoop:
|
||||||
|
for _, resourceHook := range test.hooks {
|
||||||
|
for _, hook := range resourceHook.hooks {
|
||||||
|
hookError := test.hookErrorsByContainer[hook.Exec.Container]
|
||||||
|
podCommandExecutor.On("executePodCommand", mock.Anything, test.item.UnstructuredContent(), "ns", "name", resourceHook.name, hook.Exec).Return(hookError)
|
||||||
|
if hookError != nil && hook.Exec.OnError == v1.HookErrorModeFail {
|
||||||
|
break hookLoop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
groupResource := schema.ParseGroupResource(test.groupResource)
|
||||||
|
err := h.handleHooks(arktest.NewLogger(), groupResource, test.item, test.hooks)
|
||||||
|
|
||||||
|
if test.expectedError != nil {
|
||||||
|
assert.EqualError(t, err, test.expectedError.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetPodExecHookFromAnnotations(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
annotations map[string]string
|
||||||
|
expectedHook *v1.ExecHook
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "missing command annotation",
|
||||||
|
expectedHook: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "malformed command json array",
|
||||||
|
annotations: map[string]string{
|
||||||
|
podBackupHookCommandAnnotationKey: "[blarg",
|
||||||
|
},
|
||||||
|
expectedHook: &v1.ExecHook{
|
||||||
|
Command: []string{"[blarg"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid command json array",
|
||||||
|
annotations: map[string]string{
|
||||||
|
podBackupHookCommandAnnotationKey: `["a","b","c"]`,
|
||||||
|
},
|
||||||
|
expectedHook: &v1.ExecHook{
|
||||||
|
Command: []string{"a", "b", "c"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "command as a string",
|
||||||
|
annotations: map[string]string{
|
||||||
|
podBackupHookCommandAnnotationKey: "/usr/bin/foo",
|
||||||
|
},
|
||||||
|
expectedHook: &v1.ExecHook{
|
||||||
|
Command: []string{"/usr/bin/foo"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "hook mode set to continue",
|
||||||
|
annotations: map[string]string{
|
||||||
|
podBackupHookCommandAnnotationKey: "/usr/bin/foo",
|
||||||
|
podBackupHookOnErrorAnnotationKey: string(v1.HookErrorModeContinue),
|
||||||
|
},
|
||||||
|
expectedHook: &v1.ExecHook{
|
||||||
|
Command: []string{"/usr/bin/foo"},
|
||||||
|
OnError: v1.HookErrorModeContinue,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "hook mode set to fail",
|
||||||
|
annotations: map[string]string{
|
||||||
|
podBackupHookCommandAnnotationKey: "/usr/bin/foo",
|
||||||
|
podBackupHookOnErrorAnnotationKey: string(v1.HookErrorModeFail),
|
||||||
|
},
|
||||||
|
expectedHook: &v1.ExecHook{
|
||||||
|
Command: []string{"/usr/bin/foo"},
|
||||||
|
OnError: v1.HookErrorModeFail,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "use the specified timeout",
|
||||||
|
annotations: map[string]string{
|
||||||
|
podBackupHookCommandAnnotationKey: "/usr/bin/foo",
|
||||||
|
podBackupHookTimeoutAnnotationKey: "5m3s",
|
||||||
|
},
|
||||||
|
expectedHook: &v1.ExecHook{
|
||||||
|
Command: []string{"/usr/bin/foo"},
|
||||||
|
Timeout: metav1.Duration{Duration: 5*time.Minute + 3*time.Second},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid timeout is ignored",
|
||||||
|
annotations: map[string]string{
|
||||||
|
podBackupHookCommandAnnotationKey: "/usr/bin/foo",
|
||||||
|
podBackupHookTimeoutAnnotationKey: "invalid",
|
||||||
|
},
|
||||||
|
expectedHook: &v1.ExecHook{
|
||||||
|
Command: []string{"/usr/bin/foo"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "use the specified container",
|
||||||
|
annotations: map[string]string{
|
||||||
|
podBackupHookContainerAnnotationKey: "some-container",
|
||||||
|
podBackupHookCommandAnnotationKey: "/usr/bin/foo",
|
||||||
|
},
|
||||||
|
expectedHook: &v1.ExecHook{
|
||||||
|
Container: "some-container",
|
||||||
|
Command: []string{"/usr/bin/foo"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
hook := getPodExecHookFromAnnotations(test.annotations)
|
||||||
|
assert.Equal(t, test.expectedHook, hook)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResourceHookApplicableTo(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
includedNamespaces []string
|
||||||
|
excludedNamespaces []string
|
||||||
|
includedResources []string
|
||||||
|
excludedResources []string
|
||||||
|
labelSelector string
|
||||||
|
namespace string
|
||||||
|
resource schema.GroupResource
|
||||||
|
labels labels.Set
|
||||||
|
expected bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "allow anything",
|
||||||
|
namespace: "foo",
|
||||||
|
resource: schema.GroupResource{Group: "foo", Resource: "bar"},
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "namespace in included list",
|
||||||
|
includedNamespaces: []string{"a", "b"},
|
||||||
|
excludedNamespaces: []string{"c", "d"},
|
||||||
|
namespace: "b",
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "namespace not in included list",
|
||||||
|
includedNamespaces: []string{"a", "b"},
|
||||||
|
namespace: "c",
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "namespace excluded",
|
||||||
|
excludedNamespaces: []string{"a", "b"},
|
||||||
|
namespace: "a",
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "resource in included list",
|
||||||
|
includedResources: []string{"foo.a", "bar.b"},
|
||||||
|
excludedResources: []string{"baz.c"},
|
||||||
|
resource: schema.GroupResource{Group: "a", Resource: "foo"},
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "resource not in included list",
|
||||||
|
includedResources: []string{"foo.a", "bar.b"},
|
||||||
|
resource: schema.GroupResource{Group: "c", Resource: "baz"},
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "resource excluded",
|
||||||
|
excludedResources: []string{"foo.a", "bar.b"},
|
||||||
|
resource: schema.GroupResource{Group: "b", Resource: "bar"},
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "label selector matches",
|
||||||
|
labelSelector: "a=b",
|
||||||
|
labels: labels.Set{"a": "b"},
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "label selector doesn't match",
|
||||||
|
labelSelector: "a=b",
|
||||||
|
labels: labels.Set{"a": "c"},
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
h := resourceHook{
|
||||||
|
namespaces: collections.NewIncludesExcludes().Includes(test.includedNamespaces...).Excludes(test.excludedNamespaces...),
|
||||||
|
resources: collections.NewIncludesExcludes().Includes(test.includedResources...).Excludes(test.excludedResources...),
|
||||||
|
}
|
||||||
|
if test.labelSelector != "" {
|
||||||
|
selector, err := labels.Parse(test.labelSelector)
|
||||||
|
require.NoError(t, err)
|
||||||
|
h.labelSelector = selector
|
||||||
|
}
|
||||||
|
|
||||||
|
result := h.applicableTo(test.resource, test.namespace, test.labels)
|
||||||
|
assert.Equal(t, test.expected, result)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,17 @@
|
||||||
|
/*
|
||||||
|
Copyright 2017 the Heptio Ark contributors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package backup
|
|
@ -0,0 +1,225 @@
|
||||||
|
/*
|
||||||
|
Copyright 2017 the Heptio Ark contributors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package backup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"net/url"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||||
|
"github.com/heptio/ark/pkg/util/collections"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
remotecommandconsts "k8s.io/apimachinery/pkg/util/remotecommand"
|
||||||
|
kscheme "k8s.io/client-go/kubernetes/scheme"
|
||||||
|
kapiv1 "k8s.io/client-go/pkg/api/v1"
|
||||||
|
"k8s.io/client-go/rest"
|
||||||
|
"k8s.io/client-go/tools/remotecommand"
|
||||||
|
)
|
||||||
|
|
||||||
|
// podCommandExecutor is capable of executing a command in a container in a pod.
|
||||||
|
type podCommandExecutor interface {
|
||||||
|
// executePodCommand executes a command in a container in a pod. If the command takes longer than
|
||||||
|
// the specified timeout, an error is returned.
|
||||||
|
executePodCommand(log *logrus.Entry, item map[string]interface{}, namespace, name, hookName string, hook *api.ExecHook) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type poster interface {
|
||||||
|
Post() *rest.Request
|
||||||
|
}
|
||||||
|
|
||||||
|
type defaultPodCommandExecutor struct {
|
||||||
|
restClientConfig *rest.Config
|
||||||
|
restClient poster
|
||||||
|
|
||||||
|
streamExecutorFactory streamExecutorFactory
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPodCommandExecutor creates a new podCommandExecutor.
|
||||||
|
func NewPodCommandExecutor(restClientConfig *rest.Config, restClient poster) podCommandExecutor {
|
||||||
|
return &defaultPodCommandExecutor{
|
||||||
|
restClientConfig: restClientConfig,
|
||||||
|
restClient: restClient,
|
||||||
|
|
||||||
|
streamExecutorFactory: &defaultStreamExecutorFactory{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// executePodCommand uses the pod exec API to execute a command in a container in a pod. If the
|
||||||
|
// command takes longer than the specified timeout, an error is returned (NOTE: it is not currently
|
||||||
|
// possible to ensure the command is terminated when the timeout occurs, so it may continue to run
|
||||||
|
// in the background).
|
||||||
|
func (e *defaultPodCommandExecutor) executePodCommand(log *logrus.Entry, item map[string]interface{}, namespace, name, hookName string, hook *api.ExecHook) error {
|
||||||
|
if item == nil {
|
||||||
|
return errors.New("item is required")
|
||||||
|
}
|
||||||
|
if namespace == "" {
|
||||||
|
return errors.New("namespace is required")
|
||||||
|
}
|
||||||
|
if name == "" {
|
||||||
|
return errors.New("name is required")
|
||||||
|
}
|
||||||
|
if hookName == "" {
|
||||||
|
return errors.New("hookName is required")
|
||||||
|
}
|
||||||
|
if hook == nil {
|
||||||
|
return errors.New("hook is required")
|
||||||
|
}
|
||||||
|
|
||||||
|
if hook.Container == "" {
|
||||||
|
if err := setDefaultHookContainer(item, hook); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else if err := ensureContainerExists(item, hook.Container); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(hook.Command) == 0 {
|
||||||
|
return errors.New("command is required")
|
||||||
|
}
|
||||||
|
|
||||||
|
switch hook.OnError {
|
||||||
|
case api.HookErrorModeFail, api.HookErrorModeContinue:
|
||||||
|
// use the specified value
|
||||||
|
default:
|
||||||
|
// default to fail
|
||||||
|
hook.OnError = api.HookErrorModeFail
|
||||||
|
}
|
||||||
|
|
||||||
|
if hook.Timeout.Duration == 0 {
|
||||||
|
hook.Timeout.Duration = defaultHookTimeout
|
||||||
|
}
|
||||||
|
|
||||||
|
hookLog := log.WithFields(
|
||||||
|
logrus.Fields{
|
||||||
|
"hookName": hookName,
|
||||||
|
"hookContainer": hook.Container,
|
||||||
|
"hookCommand": hook.Command,
|
||||||
|
"hookOnError": hook.OnError,
|
||||||
|
"hookTimeout": hook.Timeout,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
hookLog.Info("running exec hook")
|
||||||
|
|
||||||
|
req := e.restClient.Post().
|
||||||
|
Resource("pods").
|
||||||
|
Namespace(namespace).
|
||||||
|
Name(name).
|
||||||
|
SubResource("exec")
|
||||||
|
|
||||||
|
req.VersionedParams(&kapiv1.PodExecOptions{
|
||||||
|
Container: hook.Container,
|
||||||
|
Command: hook.Command,
|
||||||
|
Stdout: true,
|
||||||
|
Stderr: true,
|
||||||
|
}, kscheme.ParameterCodec)
|
||||||
|
|
||||||
|
executor, err := e.streamExecutorFactory.NewExecutor(e.restClientConfig, "POST", req.URL())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var stdout, stderr bytes.Buffer
|
||||||
|
|
||||||
|
streamOptions := remotecommand.StreamOptions{
|
||||||
|
SupportedProtocols: remotecommandconsts.SupportedStreamingProtocols,
|
||||||
|
Stdout: &stdout,
|
||||||
|
Stderr: &stderr,
|
||||||
|
}
|
||||||
|
|
||||||
|
errCh := make(chan error)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
err = executor.Stream(streamOptions)
|
||||||
|
errCh <- err
|
||||||
|
}()
|
||||||
|
|
||||||
|
var timeoutCh <-chan time.Time
|
||||||
|
if hook.Timeout.Duration > 0 {
|
||||||
|
timer := time.NewTimer(hook.Timeout.Duration)
|
||||||
|
defer timer.Stop()
|
||||||
|
timeoutCh = timer.C
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case err = <-errCh:
|
||||||
|
case <-timeoutCh:
|
||||||
|
return errors.Errorf("timed out after %v", hook.Timeout.Duration)
|
||||||
|
}
|
||||||
|
|
||||||
|
hookLog.Infof("stdout: %s", stdout.String())
|
||||||
|
hookLog.Infof("stderr: %s", stderr.String())
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func ensureContainerExists(pod map[string]interface{}, container string) error {
|
||||||
|
containers, err := collections.GetSlice(pod, "spec.containers")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, obj := range containers {
|
||||||
|
c, ok := obj.(map[string]interface{})
|
||||||
|
if !ok {
|
||||||
|
return errors.Errorf("unexpected type for container %T", obj)
|
||||||
|
}
|
||||||
|
name, ok := c["name"].(string)
|
||||||
|
if !ok {
|
||||||
|
return errors.Errorf("unexpected type for container name %T", c["name"])
|
||||||
|
}
|
||||||
|
if name == container {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return errors.Errorf("no such container: %q", container)
|
||||||
|
}
|
||||||
|
|
||||||
|
func setDefaultHookContainer(pod map[string]interface{}, hook *api.ExecHook) error {
|
||||||
|
containers, err := collections.GetSlice(pod, "spec.containers")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(containers) < 1 {
|
||||||
|
return errors.New("need at least 1 container")
|
||||||
|
}
|
||||||
|
|
||||||
|
container, ok := containers[0].(map[string]interface{})
|
||||||
|
if !ok {
|
||||||
|
return errors.Errorf("unexpected type for container %T", pod)
|
||||||
|
}
|
||||||
|
|
||||||
|
name, ok := container["name"].(string)
|
||||||
|
if !ok {
|
||||||
|
return errors.Errorf("unexpected type for container name %T", container["name"])
|
||||||
|
}
|
||||||
|
hook.Container = name
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type streamExecutorFactory interface {
|
||||||
|
NewExecutor(config *rest.Config, method string, url *url.URL) (remotecommand.StreamExecutor, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type defaultStreamExecutorFactory struct{}
|
||||||
|
|
||||||
|
func (f *defaultStreamExecutorFactory) NewExecutor(config *rest.Config, method string, url *url.URL) (remotecommand.StreamExecutor, error) {
|
||||||
|
return remotecommand.NewExecutor(config, method, url)
|
||||||
|
}
|
|
@ -0,0 +1,278 @@
|
||||||
|
/*
|
||||||
|
Copyright 2017 the Heptio Ark contributors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package backup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||||
|
arktest "github.com/heptio/ark/pkg/util/test"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/mock"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
remotecommandconsts "k8s.io/apimachinery/pkg/util/remotecommand"
|
||||||
|
"k8s.io/client-go/rest"
|
||||||
|
"k8s.io/client-go/tools/remotecommand"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewPodCommandExecutor(t *testing.T) {
|
||||||
|
restClientConfig := &rest.Config{Host: "foo"}
|
||||||
|
poster := &mockPoster{}
|
||||||
|
pce := NewPodCommandExecutor(restClientConfig, poster).(*defaultPodCommandExecutor)
|
||||||
|
assert.Equal(t, restClientConfig, pce.restClientConfig)
|
||||||
|
assert.Equal(t, poster, pce.restClient)
|
||||||
|
assert.Equal(t, &defaultStreamExecutorFactory{}, pce.streamExecutorFactory)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExecutePodCommandMissingInputs(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
item map[string]interface{}
|
||||||
|
podNamespace string
|
||||||
|
podName string
|
||||||
|
hookName string
|
||||||
|
hook *v1.ExecHook
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "missing item",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "missing pod namespace",
|
||||||
|
item: map[string]interface{}{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "missing pod name",
|
||||||
|
item: map[string]interface{}{},
|
||||||
|
podNamespace: "ns",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "missing hookName",
|
||||||
|
item: map[string]interface{}{},
|
||||||
|
podNamespace: "ns",
|
||||||
|
podName: "pod",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "missing hook",
|
||||||
|
item: map[string]interface{}{},
|
||||||
|
podNamespace: "ns",
|
||||||
|
podName: "pod",
|
||||||
|
hookName: "hook",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "container not found",
|
||||||
|
item: unstructuredOrDie(`{"kind":"Pod","spec":{"containers":[{"name":"foo"}]}}`).Object,
|
||||||
|
podNamespace: "ns",
|
||||||
|
podName: "pod",
|
||||||
|
hookName: "hook",
|
||||||
|
hook: &v1.ExecHook{
|
||||||
|
Container: "missing",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "command missing",
|
||||||
|
item: unstructuredOrDie(`{"kind":"Pod","spec":{"containers":[{"name":"foo"}]}}`).Object,
|
||||||
|
podNamespace: "ns",
|
||||||
|
podName: "pod",
|
||||||
|
hookName: "hook",
|
||||||
|
hook: &v1.ExecHook{
|
||||||
|
Container: "foo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
e := &defaultPodCommandExecutor{}
|
||||||
|
err := e.executePodCommand(arktest.NewLogger(), test.item, test.podNamespace, test.podName, test.hookName, test.hook)
|
||||||
|
assert.Error(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExecutePodCommand(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
containerName string
|
||||||
|
expectedContainerName string
|
||||||
|
command []string
|
||||||
|
errorMode v1.HookErrorMode
|
||||||
|
expectedErrorMode v1.HookErrorMode
|
||||||
|
timeout time.Duration
|
||||||
|
expectedTimeout time.Duration
|
||||||
|
hookError error
|
||||||
|
expectedError string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "validate defaults",
|
||||||
|
command: []string{"some", "command"},
|
||||||
|
expectedContainerName: "foo",
|
||||||
|
expectedErrorMode: v1.HookErrorModeFail,
|
||||||
|
expectedTimeout: 30 * time.Second,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "use specified values",
|
||||||
|
command: []string{"some", "command"},
|
||||||
|
containerName: "bar",
|
||||||
|
expectedContainerName: "bar",
|
||||||
|
errorMode: v1.HookErrorModeContinue,
|
||||||
|
expectedErrorMode: v1.HookErrorModeContinue,
|
||||||
|
timeout: 10 * time.Second,
|
||||||
|
expectedTimeout: 10 * time.Second,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "hook error",
|
||||||
|
command: []string{"some", "command"},
|
||||||
|
expectedContainerName: "foo",
|
||||||
|
expectedErrorMode: v1.HookErrorModeFail,
|
||||||
|
expectedTimeout: 30 * time.Second,
|
||||||
|
hookError: errors.New("hook error"),
|
||||||
|
expectedError: "hook error",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
hook := v1.ExecHook{
|
||||||
|
Container: test.containerName,
|
||||||
|
Command: test.command,
|
||||||
|
OnError: test.errorMode,
|
||||||
|
Timeout: metav1.Duration{Duration: test.timeout},
|
||||||
|
}
|
||||||
|
|
||||||
|
pod, err := getAsMap(`
|
||||||
|
{
|
||||||
|
"metadata": {
|
||||||
|
"namespace": "namespace",
|
||||||
|
"name": "name"
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"containers": [
|
||||||
|
{"name": "foo"},
|
||||||
|
{"name": "bar"}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}`)
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
clientConfig := &rest.Config{}
|
||||||
|
poster := &mockPoster{}
|
||||||
|
defer poster.AssertExpectations(t)
|
||||||
|
podCommandExecutor := NewPodCommandExecutor(clientConfig, poster).(*defaultPodCommandExecutor)
|
||||||
|
|
||||||
|
streamExecutorFactory := &mockStreamExecutorFactory{}
|
||||||
|
defer streamExecutorFactory.AssertExpectations(t)
|
||||||
|
podCommandExecutor.streamExecutorFactory = streamExecutorFactory
|
||||||
|
|
||||||
|
baseUrl, _ := url.Parse("https://some.server")
|
||||||
|
contentConfig := rest.ContentConfig{
|
||||||
|
GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"},
|
||||||
|
}
|
||||||
|
postRequest := rest.NewRequest(nil, "POST", baseUrl, "/api/v1", contentConfig, rest.Serializers{}, nil, nil)
|
||||||
|
poster.On("Post").Return(postRequest)
|
||||||
|
|
||||||
|
streamExecutor := &mockStreamExecutor{}
|
||||||
|
defer streamExecutor.AssertExpectations(t)
|
||||||
|
|
||||||
|
expectedCommand := strings.Join(test.command, "&command=")
|
||||||
|
expectedURL, _ := url.Parse(
|
||||||
|
fmt.Sprintf("https://some.server/api/v1/namespaces/namespace/pods/name/exec?command=%s&container=%s&stderr=true&stdout=true", expectedCommand, test.expectedContainerName),
|
||||||
|
)
|
||||||
|
streamExecutorFactory.On("NewExecutor", clientConfig, "POST", expectedURL).Return(streamExecutor, nil)
|
||||||
|
|
||||||
|
var stdout, stderr bytes.Buffer
|
||||||
|
expectedStreamOptions := remotecommand.StreamOptions{
|
||||||
|
SupportedProtocols: remotecommandconsts.SupportedStreamingProtocols,
|
||||||
|
Stdout: &stdout,
|
||||||
|
Stderr: &stderr,
|
||||||
|
}
|
||||||
|
streamExecutor.On("Stream", expectedStreamOptions).Return(test.hookError)
|
||||||
|
|
||||||
|
err = podCommandExecutor.executePodCommand(arktest.NewLogger(), pod, "namespace", "name", "hookName", &hook)
|
||||||
|
if test.expectedError != "" {
|
||||||
|
assert.EqualError(t, err, test.expectedError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEnsureContainerExists(t *testing.T) {
|
||||||
|
pod := map[string]interface{}{
|
||||||
|
"spec": map[string]interface{}{
|
||||||
|
"containers": []interface{}{
|
||||||
|
map[string]interface{}{
|
||||||
|
"name": "foo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := ensureContainerExists(pod, "bar")
|
||||||
|
assert.EqualError(t, err, `no such container: "bar"`)
|
||||||
|
|
||||||
|
err = ensureContainerExists(pod, "foo")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockStreamExecutorFactory struct {
|
||||||
|
mock.Mock
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *mockStreamExecutorFactory) NewExecutor(config *rest.Config, method string, url *url.URL) (remotecommand.StreamExecutor, error) {
|
||||||
|
args := f.Called(config, method, url)
|
||||||
|
return args.Get(0).(remotecommand.StreamExecutor), args.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockStreamExecutor struct {
|
||||||
|
mock.Mock
|
||||||
|
remotecommand.StreamExecutor
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *mockStreamExecutor) Stream(options remotecommand.StreamOptions) error {
|
||||||
|
args := e.Called(options)
|
||||||
|
return args.Error(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockPoster struct {
|
||||||
|
mock.Mock
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mockPoster) Post() *rest.Request {
|
||||||
|
args := p.Called()
|
||||||
|
return args.Get(0).(*rest.Request)
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockPodCommandExecutor struct {
|
||||||
|
mock.Mock
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *mockPodCommandExecutor) executePodCommand(log *logrus.Entry, item map[string]interface{}, namespace, name, hookName string, hook *v1.ExecHook) error {
|
||||||
|
args := e.Called(log, item, namespace, name, hookName, hook)
|
||||||
|
return args.Error(0)
|
||||||
|
}
|
|
@ -0,0 +1,252 @@
|
||||||
|
/*
|
||||||
|
Copyright 2017 the Heptio Ark contributors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package backup
|
||||||
|
|
||||||
|
import (
|
||||||
|
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||||
|
"github.com/heptio/ark/pkg/client"
|
||||||
|
"github.com/heptio/ark/pkg/discovery"
|
||||||
|
"github.com/heptio/ark/pkg/util/collections"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
kuberrs "k8s.io/apimachinery/pkg/util/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
type resourceBackupperFactory interface {
|
||||||
|
newResourceBackupper(
|
||||||
|
log *logrus.Entry,
|
||||||
|
backup *api.Backup,
|
||||||
|
namespaces *collections.IncludesExcludes,
|
||||||
|
resources *collections.IncludesExcludes,
|
||||||
|
labelSelector string,
|
||||||
|
dynamicFactory client.DynamicFactory,
|
||||||
|
discoveryHelper discovery.Helper,
|
||||||
|
backedUpItems map[itemKey]struct{},
|
||||||
|
cohabitatingResources map[string]*cohabitatingResource,
|
||||||
|
actions map[schema.GroupResource]Action,
|
||||||
|
podCommandExecutor podCommandExecutor,
|
||||||
|
tarWriter tarWriter,
|
||||||
|
resourceHooks []resourceHook,
|
||||||
|
) resourceBackupper
|
||||||
|
}
|
||||||
|
|
||||||
|
type defaultResourceBackupperFactory struct{}
|
||||||
|
|
||||||
|
func (f *defaultResourceBackupperFactory) newResourceBackupper(
|
||||||
|
log *logrus.Entry,
|
||||||
|
backup *api.Backup,
|
||||||
|
namespaces *collections.IncludesExcludes,
|
||||||
|
resources *collections.IncludesExcludes,
|
||||||
|
labelSelector string,
|
||||||
|
dynamicFactory client.DynamicFactory,
|
||||||
|
discoveryHelper discovery.Helper,
|
||||||
|
backedUpItems map[itemKey]struct{},
|
||||||
|
cohabitatingResources map[string]*cohabitatingResource,
|
||||||
|
actions map[schema.GroupResource]Action,
|
||||||
|
podCommandExecutor podCommandExecutor,
|
||||||
|
tarWriter tarWriter,
|
||||||
|
resourceHooks []resourceHook,
|
||||||
|
) resourceBackupper {
|
||||||
|
return &defaultResourceBackupper{
|
||||||
|
log: log,
|
||||||
|
backup: backup,
|
||||||
|
namespaces: namespaces,
|
||||||
|
resources: resources,
|
||||||
|
labelSelector: labelSelector,
|
||||||
|
dynamicFactory: dynamicFactory,
|
||||||
|
discoveryHelper: discoveryHelper,
|
||||||
|
backedUpItems: backedUpItems,
|
||||||
|
actions: actions,
|
||||||
|
cohabitatingResources: cohabitatingResources,
|
||||||
|
podCommandExecutor: podCommandExecutor,
|
||||||
|
tarWriter: tarWriter,
|
||||||
|
resourceHooks: resourceHooks,
|
||||||
|
|
||||||
|
itemBackupperFactory: &defaultItemBackupperFactory{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type resourceBackupper interface {
|
||||||
|
backupResource(group *metav1.APIResourceList, resource metav1.APIResource) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type defaultResourceBackupper struct {
|
||||||
|
log *logrus.Entry
|
||||||
|
backup *api.Backup
|
||||||
|
namespaces *collections.IncludesExcludes
|
||||||
|
resources *collections.IncludesExcludes
|
||||||
|
labelSelector string
|
||||||
|
dynamicFactory client.DynamicFactory
|
||||||
|
discoveryHelper discovery.Helper
|
||||||
|
backedUpItems map[itemKey]struct{}
|
||||||
|
cohabitatingResources map[string]*cohabitatingResource
|
||||||
|
actions map[schema.GroupResource]Action
|
||||||
|
podCommandExecutor podCommandExecutor
|
||||||
|
tarWriter tarWriter
|
||||||
|
resourceHooks []resourceHook
|
||||||
|
|
||||||
|
itemBackupperFactory itemBackupperFactory
|
||||||
|
}
|
||||||
|
|
||||||
|
// backupResource backs up all the objects for a given group-version-resource.
|
||||||
|
func (rb *defaultResourceBackupper) backupResource(
|
||||||
|
group *metav1.APIResourceList,
|
||||||
|
resource metav1.APIResource,
|
||||||
|
) error {
|
||||||
|
var errs []error
|
||||||
|
|
||||||
|
gv, err := schema.ParseGroupVersion(group.GroupVersion)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "error parsing GroupVersion %s", group.GroupVersion)
|
||||||
|
}
|
||||||
|
gr := schema.GroupResource{Group: gv.Group, Resource: resource.Name}
|
||||||
|
grString := gr.String()
|
||||||
|
|
||||||
|
log := rb.log.WithField("groupResource", grString)
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case rb.backup.Spec.IncludeClusterResources == nil:
|
||||||
|
// when IncludeClusterResources == nil (auto), only directly
|
||||||
|
// back up cluster-scoped resources if we're doing a full-cluster
|
||||||
|
// (all namespaces) backup. Note that in the case of a subset of
|
||||||
|
// namespaces being backed up, some related cluster-scoped resources
|
||||||
|
// may still be backed up if triggered by a custom action (e.g. PVC->PV).
|
||||||
|
if !resource.Namespaced && !rb.namespaces.IncludeEverything() {
|
||||||
|
log.Info("Skipping resource because it's cluster-scoped and only specific namespaces are included in the backup")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
case *rb.backup.Spec.IncludeClusterResources == false:
|
||||||
|
if !resource.Namespaced {
|
||||||
|
log.Info("Skipping resource because it's cluster-scoped")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
case *rb.backup.Spec.IncludeClusterResources == true:
|
||||||
|
// include the resource, no action required
|
||||||
|
}
|
||||||
|
|
||||||
|
if !rb.resources.ShouldInclude(grString) {
|
||||||
|
log.Infof("Resource is excluded")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if cohabitator, found := rb.cohabitatingResources[resource.Name]; found {
|
||||||
|
if cohabitator.seen {
|
||||||
|
log.WithFields(
|
||||||
|
logrus.Fields{
|
||||||
|
"cohabitatingResource1": cohabitator.groupResource1.String(),
|
||||||
|
"cohabitatingResource2": cohabitator.groupResource2.String(),
|
||||||
|
},
|
||||||
|
).Infof("Skipping resource because it cohabitates and we've already processed it")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
cohabitator.seen = true
|
||||||
|
}
|
||||||
|
|
||||||
|
itemBackupper := rb.itemBackupperFactory.newItemBackupper(
|
||||||
|
rb.backup,
|
||||||
|
rb.namespaces,
|
||||||
|
rb.resources,
|
||||||
|
rb.backedUpItems,
|
||||||
|
rb.actions,
|
||||||
|
rb.podCommandExecutor,
|
||||||
|
rb.tarWriter,
|
||||||
|
rb.resourceHooks,
|
||||||
|
rb.dynamicFactory,
|
||||||
|
rb.discoveryHelper,
|
||||||
|
)
|
||||||
|
|
||||||
|
var namespacesToList []string
|
||||||
|
if resource.Namespaced {
|
||||||
|
namespacesToList = getNamespacesToList(rb.namespaces)
|
||||||
|
} else {
|
||||||
|
namespacesToList = []string{""}
|
||||||
|
}
|
||||||
|
for _, namespace := range namespacesToList {
|
||||||
|
resourceClient, err := rb.dynamicFactory.ClientForGroupVersionResource(gv, resource, namespace)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
unstructuredList, err := resourceClient.List(metav1.ListOptions{LabelSelector: rb.labelSelector})
|
||||||
|
if err != nil {
|
||||||
|
return errors.WithStack(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// do the backup
|
||||||
|
items, err := meta.ExtractList(unstructuredList)
|
||||||
|
if err != nil {
|
||||||
|
return errors.WithStack(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, item := range items {
|
||||||
|
unstructured, ok := item.(runtime.Unstructured)
|
||||||
|
if !ok {
|
||||||
|
errs = append(errs, errors.Errorf("unexpected type %T", item))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := itemBackupper.backupItem(log, unstructured, gr); err != nil {
|
||||||
|
errs = append(errs, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return kuberrs.NewAggregate(errs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getNamespacesToList examines ie and resolves the includes and excludes to a full list of
|
||||||
|
// namespaces to list. If ie is nil or it includes *, the result is just "" (list across all
|
||||||
|
// namespaces). Otherwise, the result is a list of every included namespace minus all excluded ones.
|
||||||
|
func getNamespacesToList(ie *collections.IncludesExcludes) []string {
|
||||||
|
if ie == nil {
|
||||||
|
return []string{""}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ie.ShouldInclude("*") {
|
||||||
|
// "" means all namespaces
|
||||||
|
return []string{""}
|
||||||
|
}
|
||||||
|
|
||||||
|
var list []string
|
||||||
|
for _, i := range ie.GetIncludes() {
|
||||||
|
if ie.ShouldInclude(i) {
|
||||||
|
list = append(list, i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return list
|
||||||
|
}
|
||||||
|
|
||||||
|
type cohabitatingResource struct {
|
||||||
|
resource string
|
||||||
|
groupResource1 schema.GroupResource
|
||||||
|
groupResource2 schema.GroupResource
|
||||||
|
seen bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCohabitatingResource(resource, group1, group2 string) *cohabitatingResource {
|
||||||
|
return &cohabitatingResource{
|
||||||
|
resource: resource,
|
||||||
|
groupResource1: schema.GroupResource{Group: group1, Resource: resource},
|
||||||
|
groupResource2: schema.GroupResource{Group: group2, Resource: resource},
|
||||||
|
seen: false,
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,744 @@
|
||||||
|
/*
|
||||||
|
Copyright 2017 the Heptio Ark contributors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package backup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||||
|
"github.com/heptio/ark/pkg/client"
|
||||||
|
"github.com/heptio/ark/pkg/discovery"
|
||||||
|
"github.com/heptio/ark/pkg/util/collections"
|
||||||
|
arktest "github.com/heptio/ark/pkg/util/test"
|
||||||
|
"github.com/stretchr/testify/mock"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestBackupResource(t *testing.T) {
|
||||||
|
var (
|
||||||
|
trueVal = true
|
||||||
|
falseVal = false
|
||||||
|
truePointer = &trueVal
|
||||||
|
falsePointer = &falseVal
|
||||||
|
)
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
namespaces *collections.IncludesExcludes
|
||||||
|
resources *collections.IncludesExcludes
|
||||||
|
expectSkip bool
|
||||||
|
expectedListedNamespaces []string
|
||||||
|
apiGroup *metav1.APIResourceList
|
||||||
|
apiResource metav1.APIResource
|
||||||
|
groupVersion schema.GroupVersion
|
||||||
|
groupResource schema.GroupResource
|
||||||
|
listResponses [][]*unstructured.Unstructured
|
||||||
|
includeClusterResources *bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "resource not included",
|
||||||
|
apiGroup: v1Group,
|
||||||
|
apiResource: podsResource,
|
||||||
|
resources: collections.NewIncludesExcludes().Excludes("pods"),
|
||||||
|
expectSkip: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "list all namespaces",
|
||||||
|
namespaces: collections.NewIncludesExcludes(),
|
||||||
|
resources: collections.NewIncludesExcludes(),
|
||||||
|
expectedListedNamespaces: []string{""},
|
||||||
|
apiGroup: v1Group,
|
||||||
|
apiResource: podsResource,
|
||||||
|
groupVersion: schema.GroupVersion{Group: "", Version: "v1"},
|
||||||
|
groupResource: schema.GroupResource{Group: "", Resource: "pods"},
|
||||||
|
listResponses: [][]*unstructured.Unstructured{
|
||||||
|
{
|
||||||
|
unstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"myns","name":"myname1"}}`),
|
||||||
|
unstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"myns","name":"myname2"}}`),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "list selected namespaces",
|
||||||
|
namespaces: collections.NewIncludesExcludes().Includes("a", "b"),
|
||||||
|
resources: collections.NewIncludesExcludes(),
|
||||||
|
expectedListedNamespaces: []string{"a", "b"},
|
||||||
|
apiGroup: v1Group,
|
||||||
|
apiResource: podsResource,
|
||||||
|
groupVersion: schema.GroupVersion{Group: "", Version: "v1"},
|
||||||
|
groupResource: schema.GroupResource{Group: "", Resource: "pods"},
|
||||||
|
listResponses: [][]*unstructured.Unstructured{
|
||||||
|
{
|
||||||
|
unstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"a","name":"myname1"}}`),
|
||||||
|
unstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"a","name":"myname2"}}`),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
unstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"b","name":"myname3"}}`),
|
||||||
|
unstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"b","name":"myname4"}}`),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "list all namespaces - cluster scoped",
|
||||||
|
namespaces: collections.NewIncludesExcludes(),
|
||||||
|
resources: collections.NewIncludesExcludes(),
|
||||||
|
expectedListedNamespaces: []string{""},
|
||||||
|
apiGroup: certificatesGroup,
|
||||||
|
apiResource: certificateSigningRequestsResource,
|
||||||
|
groupVersion: schema.GroupVersion{Group: "certificates.k8s.io", Version: "v1beta1"},
|
||||||
|
groupResource: schema.GroupResource{Group: "certificates.k8s.io", Resource: "certificatesigningrequests"},
|
||||||
|
listResponses: [][]*unstructured.Unstructured{
|
||||||
|
{
|
||||||
|
unstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname1"}}`),
|
||||||
|
unstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname2"}}`),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "should include cluster-scoped resource if backing up subset of namespaces and --include-cluster-resources=true",
|
||||||
|
namespaces: collections.NewIncludesExcludes().Includes("ns-1"),
|
||||||
|
resources: collections.NewIncludesExcludes(),
|
||||||
|
includeClusterResources: truePointer,
|
||||||
|
expectedListedNamespaces: []string{""},
|
||||||
|
apiGroup: certificatesGroup,
|
||||||
|
apiResource: certificateSigningRequestsResource,
|
||||||
|
groupVersion: schema.GroupVersion{Group: "certificates.k8s.io", Version: "v1beta1"},
|
||||||
|
groupResource: schema.GroupResource{Group: "certificates.k8s.io", Resource: "certificatesigningrequests"},
|
||||||
|
listResponses: [][]*unstructured.Unstructured{
|
||||||
|
{
|
||||||
|
unstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname1"}}`),
|
||||||
|
unstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname2"}}`),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "should not include cluster-scoped resource if backing up subset of namespaces and --include-cluster-resources=false",
|
||||||
|
namespaces: collections.NewIncludesExcludes().Includes("ns-1"),
|
||||||
|
resources: collections.NewIncludesExcludes(),
|
||||||
|
includeClusterResources: falsePointer,
|
||||||
|
apiGroup: certificatesGroup,
|
||||||
|
apiResource: certificateSigningRequestsResource,
|
||||||
|
groupVersion: schema.GroupVersion{Group: "certificates.k8s.io", Version: "v1beta1"},
|
||||||
|
groupResource: schema.GroupResource{Group: "certificates.k8s.io", Resource: "certificatesigningrequests"},
|
||||||
|
expectSkip: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "should not include cluster-scoped resource if backing up subset of namespaces and --include-cluster-resources=nil",
|
||||||
|
namespaces: collections.NewIncludesExcludes().Includes("ns-1"),
|
||||||
|
resources: collections.NewIncludesExcludes(),
|
||||||
|
includeClusterResources: nil,
|
||||||
|
apiGroup: certificatesGroup,
|
||||||
|
apiResource: certificateSigningRequestsResource,
|
||||||
|
groupVersion: schema.GroupVersion{Group: "certificates.k8s.io", Version: "v1beta1"},
|
||||||
|
groupResource: schema.GroupResource{Group: "certificates.k8s.io", Resource: "certificatesigningrequests"},
|
||||||
|
expectSkip: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "should include cluster-scoped resource if backing up all namespaces and --include-cluster-resources=true",
|
||||||
|
namespaces: collections.NewIncludesExcludes(),
|
||||||
|
resources: collections.NewIncludesExcludes(),
|
||||||
|
includeClusterResources: truePointer,
|
||||||
|
expectedListedNamespaces: []string{""},
|
||||||
|
apiGroup: certificatesGroup,
|
||||||
|
apiResource: certificateSigningRequestsResource,
|
||||||
|
groupVersion: schema.GroupVersion{Group: "certificates.k8s.io", Version: "v1beta1"},
|
||||||
|
groupResource: schema.GroupResource{Group: "certificates.k8s.io", Resource: "certificatesigningrequests"},
|
||||||
|
listResponses: [][]*unstructured.Unstructured{
|
||||||
|
{
|
||||||
|
unstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname1"}}`),
|
||||||
|
unstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname2"}}`),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "should not include cluster-scoped resource if backing up all namespaces and --include-cluster-resources=false",
|
||||||
|
namespaces: collections.NewIncludesExcludes(),
|
||||||
|
resources: collections.NewIncludesExcludes(),
|
||||||
|
includeClusterResources: falsePointer,
|
||||||
|
apiGroup: certificatesGroup,
|
||||||
|
apiResource: certificateSigningRequestsResource,
|
||||||
|
groupVersion: schema.GroupVersion{Group: "certificates.k8s.io", Version: "v1beta1"},
|
||||||
|
groupResource: schema.GroupResource{Group: "certificates.k8s.io", Resource: "certificatesigningrequests"},
|
||||||
|
expectSkip: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "should include cluster-scoped resource if backing up all namespaces and --include-cluster-resources=nil",
|
||||||
|
namespaces: collections.NewIncludesExcludes(),
|
||||||
|
resources: collections.NewIncludesExcludes(),
|
||||||
|
includeClusterResources: nil,
|
||||||
|
expectedListedNamespaces: []string{""},
|
||||||
|
apiGroup: certificatesGroup,
|
||||||
|
apiResource: certificateSigningRequestsResource,
|
||||||
|
groupVersion: schema.GroupVersion{Group: "certificates.k8s.io", Version: "v1beta1"},
|
||||||
|
groupResource: schema.GroupResource{Group: "certificates.k8s.io", Resource: "certificatesigningrequests"},
|
||||||
|
listResponses: [][]*unstructured.Unstructured{
|
||||||
|
{
|
||||||
|
unstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname1"}}`),
|
||||||
|
unstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname2"}}`),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
backup := &v1.Backup{
|
||||||
|
Spec: v1.BackupSpec{
|
||||||
|
IncludeClusterResources: test.includeClusterResources,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
labelSelector := "foo=bar"
|
||||||
|
|
||||||
|
dynamicFactory := &arktest.FakeDynamicFactory{}
|
||||||
|
defer dynamicFactory.AssertExpectations(t)
|
||||||
|
|
||||||
|
discoveryHelper := arktest.NewFakeDiscoveryHelper(true, nil)
|
||||||
|
|
||||||
|
backedUpItems := map[itemKey]struct{}{
|
||||||
|
{resource: "foo", namespace: "ns", name: "name"}: struct{}{},
|
||||||
|
}
|
||||||
|
|
||||||
|
cohabitatingResources := map[string]*cohabitatingResource{
|
||||||
|
"deployments": newCohabitatingResource("deployments", "extensions", "apps"),
|
||||||
|
"networkpolicies": newCohabitatingResource("networkpolicies", "extensions", "networking.k8s.io"),
|
||||||
|
}
|
||||||
|
|
||||||
|
actions := map[schema.GroupResource]Action{
|
||||||
|
{Group: "", Resource: "pods"}: &fakeAction{},
|
||||||
|
}
|
||||||
|
|
||||||
|
resourceHooks := []resourceHook{
|
||||||
|
{name: "myhook"},
|
||||||
|
}
|
||||||
|
|
||||||
|
podCommandExecutor := &mockPodCommandExecutor{}
|
||||||
|
defer podCommandExecutor.AssertExpectations(t)
|
||||||
|
|
||||||
|
tarWriter := &fakeTarWriter{}
|
||||||
|
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
rb := (&defaultResourceBackupperFactory{}).newResourceBackupper(
|
||||||
|
arktest.NewLogger(),
|
||||||
|
backup,
|
||||||
|
test.namespaces,
|
||||||
|
test.resources,
|
||||||
|
labelSelector,
|
||||||
|
dynamicFactory,
|
||||||
|
discoveryHelper,
|
||||||
|
backedUpItems,
|
||||||
|
cohabitatingResources,
|
||||||
|
actions,
|
||||||
|
podCommandExecutor,
|
||||||
|
tarWriter,
|
||||||
|
resourceHooks,
|
||||||
|
).(*defaultResourceBackupper)
|
||||||
|
|
||||||
|
itemBackupperFactory := &mockItemBackupperFactory{}
|
||||||
|
defer itemBackupperFactory.AssertExpectations(t)
|
||||||
|
rb.itemBackupperFactory = itemBackupperFactory
|
||||||
|
|
||||||
|
if !test.expectSkip {
|
||||||
|
itemBackupper := &mockItemBackupper{}
|
||||||
|
defer itemBackupper.AssertExpectations(t)
|
||||||
|
|
||||||
|
itemBackupperFactory.On("newItemBackupper",
|
||||||
|
backup,
|
||||||
|
test.namespaces,
|
||||||
|
test.resources,
|
||||||
|
backedUpItems,
|
||||||
|
actions,
|
||||||
|
podCommandExecutor,
|
||||||
|
tarWriter,
|
||||||
|
resourceHooks,
|
||||||
|
dynamicFactory,
|
||||||
|
discoveryHelper,
|
||||||
|
).Return(itemBackupper)
|
||||||
|
|
||||||
|
for i, namespace := range test.expectedListedNamespaces {
|
||||||
|
client := &arktest.FakeDynamicClient{}
|
||||||
|
defer client.AssertExpectations(t)
|
||||||
|
|
||||||
|
dynamicFactory.On("ClientForGroupVersionResource", test.groupVersion, test.apiResource, namespace).Return(client, nil)
|
||||||
|
|
||||||
|
list := &unstructured.UnstructuredList{
|
||||||
|
Items: []unstructured.Unstructured{},
|
||||||
|
}
|
||||||
|
for _, item := range test.listResponses[i] {
|
||||||
|
list.Items = append(list.Items, *item)
|
||||||
|
itemBackupper.On("backupItem", mock.AnythingOfType("*logrus.Entry"), item, test.groupResource).Return(nil)
|
||||||
|
}
|
||||||
|
client.On("List", metav1.ListOptions{LabelSelector: labelSelector}).Return(list, nil)
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err := rb.backupResource(test.apiGroup, test.apiResource)
|
||||||
|
require.NoError(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackupResourceCohabitation(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
apiResource metav1.APIResource
|
||||||
|
apiGroup1 *metav1.APIResourceList
|
||||||
|
groupVersion1 schema.GroupVersion
|
||||||
|
apiGroup2 *metav1.APIResourceList
|
||||||
|
groupVersion2 schema.GroupVersion
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "deployments - extensions first",
|
||||||
|
apiResource: deploymentsResource,
|
||||||
|
apiGroup1: extensionsGroup,
|
||||||
|
groupVersion1: extensionsGroupVersion,
|
||||||
|
apiGroup2: appsGroup,
|
||||||
|
groupVersion2: appsGroupVersion,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "deployments - apps first",
|
||||||
|
apiResource: deploymentsResource,
|
||||||
|
apiGroup1: appsGroup,
|
||||||
|
groupVersion1: appsGroupVersion,
|
||||||
|
apiGroup2: extensionsGroup,
|
||||||
|
groupVersion2: extensionsGroupVersion,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "networkpolicies - extensions first",
|
||||||
|
apiResource: networkPoliciesResource,
|
||||||
|
apiGroup1: extensionsGroup,
|
||||||
|
groupVersion1: extensionsGroupVersion,
|
||||||
|
apiGroup2: networkingGroup,
|
||||||
|
groupVersion2: networkingGroupVersion,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "networkpolicies - networking first",
|
||||||
|
apiResource: networkPoliciesResource,
|
||||||
|
apiGroup1: networkingGroup,
|
||||||
|
groupVersion1: networkingGroupVersion,
|
||||||
|
apiGroup2: extensionsGroup,
|
||||||
|
groupVersion2: extensionsGroupVersion,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
backup := &v1.Backup{}
|
||||||
|
|
||||||
|
namespaces := collections.NewIncludesExcludes().Includes("*")
|
||||||
|
resources := collections.NewIncludesExcludes().Includes("*")
|
||||||
|
|
||||||
|
labelSelector := "foo=bar"
|
||||||
|
|
||||||
|
dynamicFactory := &arktest.FakeDynamicFactory{}
|
||||||
|
defer dynamicFactory.AssertExpectations(t)
|
||||||
|
|
||||||
|
discoveryHelper := arktest.NewFakeDiscoveryHelper(true, nil)
|
||||||
|
|
||||||
|
backedUpItems := map[itemKey]struct{}{
|
||||||
|
{resource: "foo", namespace: "ns", name: "name"}: struct{}{},
|
||||||
|
}
|
||||||
|
|
||||||
|
cohabitatingResources := map[string]*cohabitatingResource{
|
||||||
|
"deployments": newCohabitatingResource("deployments", "extensions", "apps"),
|
||||||
|
"networkpolicies": newCohabitatingResource("networkpolicies", "extensions", "networking.k8s.io"),
|
||||||
|
}
|
||||||
|
|
||||||
|
actions := map[schema.GroupResource]Action{
|
||||||
|
{Group: "", Resource: "pods"}: &fakeAction{},
|
||||||
|
}
|
||||||
|
|
||||||
|
resourceHooks := []resourceHook{
|
||||||
|
{name: "myhook"},
|
||||||
|
}
|
||||||
|
|
||||||
|
podCommandExecutor := &mockPodCommandExecutor{}
|
||||||
|
defer podCommandExecutor.AssertExpectations(t)
|
||||||
|
|
||||||
|
tarWriter := &fakeTarWriter{}
|
||||||
|
|
||||||
|
rb := (&defaultResourceBackupperFactory{}).newResourceBackupper(
|
||||||
|
arktest.NewLogger(),
|
||||||
|
backup,
|
||||||
|
namespaces,
|
||||||
|
resources,
|
||||||
|
labelSelector,
|
||||||
|
dynamicFactory,
|
||||||
|
discoveryHelper,
|
||||||
|
backedUpItems,
|
||||||
|
cohabitatingResources,
|
||||||
|
actions,
|
||||||
|
podCommandExecutor,
|
||||||
|
tarWriter,
|
||||||
|
resourceHooks,
|
||||||
|
).(*defaultResourceBackupper)
|
||||||
|
|
||||||
|
itemBackupperFactory := &mockItemBackupperFactory{}
|
||||||
|
defer itemBackupperFactory.AssertExpectations(t)
|
||||||
|
rb.itemBackupperFactory = itemBackupperFactory
|
||||||
|
|
||||||
|
itemBackupper := &mockItemBackupper{}
|
||||||
|
defer itemBackupper.AssertExpectations(t)
|
||||||
|
|
||||||
|
itemBackupperFactory.On("newItemBackupper",
|
||||||
|
backup,
|
||||||
|
namespaces,
|
||||||
|
resources,
|
||||||
|
backedUpItems,
|
||||||
|
actions,
|
||||||
|
podCommandExecutor,
|
||||||
|
tarWriter,
|
||||||
|
resourceHooks,
|
||||||
|
dynamicFactory,
|
||||||
|
discoveryHelper,
|
||||||
|
).Return(itemBackupper)
|
||||||
|
|
||||||
|
client := &arktest.FakeDynamicClient{}
|
||||||
|
defer client.AssertExpectations(t)
|
||||||
|
|
||||||
|
// STEP 1: make sure the initial backup goes through
|
||||||
|
dynamicFactory.On("ClientForGroupVersionResource", test.groupVersion1, test.apiResource, "").Return(client, nil)
|
||||||
|
client.On("List", metav1.ListOptions{LabelSelector: labelSelector}).Return(&unstructured.UnstructuredList{}, nil)
|
||||||
|
|
||||||
|
// STEP 2: do the backup
|
||||||
|
err := rb.backupResource(test.apiGroup1, test.apiResource)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// STEP 3: try to back up the cohabitating resource
|
||||||
|
err = rb.backupResource(test.apiGroup2, test.apiResource)
|
||||||
|
require.NoError(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockItemBackupperFactory struct {
|
||||||
|
mock.Mock
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ibf *mockItemBackupperFactory) newItemBackupper(
|
||||||
|
backup *v1.Backup,
|
||||||
|
namespaces, resources *collections.IncludesExcludes,
|
||||||
|
backedUpItems map[itemKey]struct{},
|
||||||
|
actions map[schema.GroupResource]Action,
|
||||||
|
podCommandExecutor podCommandExecutor,
|
||||||
|
tarWriter tarWriter,
|
||||||
|
resourceHooks []resourceHook,
|
||||||
|
dynamicFactory client.DynamicFactory,
|
||||||
|
discoveryHelper discovery.Helper,
|
||||||
|
) ItemBackupper {
|
||||||
|
args := ibf.Called(
|
||||||
|
backup,
|
||||||
|
namespaces,
|
||||||
|
resources,
|
||||||
|
backedUpItems,
|
||||||
|
actions,
|
||||||
|
podCommandExecutor,
|
||||||
|
tarWriter,
|
||||||
|
resourceHooks,
|
||||||
|
dynamicFactory,
|
||||||
|
discoveryHelper,
|
||||||
|
)
|
||||||
|
return args.Get(0).(ItemBackupper)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
func TestBackupResource2(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
resourceIncludesExcludes *collections.IncludesExcludes
|
||||||
|
resourceGroup string
|
||||||
|
resourceVersion string
|
||||||
|
resourceGV string
|
||||||
|
resourceName string
|
||||||
|
resourceNamespaced bool
|
||||||
|
namespaceIncludesExcludes *collections.IncludesExcludes
|
||||||
|
expectedListedNamespaces []string
|
||||||
|
lists []string
|
||||||
|
labelSelector string
|
||||||
|
actions map[string]Action
|
||||||
|
expectedActionIDs map[string][]string
|
||||||
|
deploymentsBackedUp bool
|
||||||
|
expectedDeploymentsBackedUp bool
|
||||||
|
networkPoliciesBackedUp bool
|
||||||
|
expectedNetworkPoliciesBackedUp bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "should not include resource",
|
||||||
|
resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("pods"),
|
||||||
|
resourceGV: "v1",
|
||||||
|
resourceName: "secrets",
|
||||||
|
resourceNamespaced: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "should skip deployments.extensions if we've seen deployments.apps",
|
||||||
|
resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
|
||||||
|
resourceGV: "extensions/v1beta1",
|
||||||
|
resourceName: "deployments",
|
||||||
|
resourceNamespaced: true,
|
||||||
|
deploymentsBackedUp: true,
|
||||||
|
expectedDeploymentsBackedUp: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "should skip deployments.apps if we've seen deployments.extensions",
|
||||||
|
resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
|
||||||
|
resourceGV: "apps/v1beta1",
|
||||||
|
resourceName: "deployments",
|
||||||
|
resourceNamespaced: true,
|
||||||
|
deploymentsBackedUp: true,
|
||||||
|
expectedDeploymentsBackedUp: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "should skip networkpolicies.extensions if we've seen networkpolicies.networking.k8s.io",
|
||||||
|
resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
|
||||||
|
resourceGV: "extensions/v1beta1",
|
||||||
|
resourceName: "networkpolicies",
|
||||||
|
resourceNamespaced: true,
|
||||||
|
networkPoliciesBackedUp: true,
|
||||||
|
expectedNetworkPoliciesBackedUp: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "should skip networkpolicies.networking.k8s.io if we've seen networkpolicies.extensions",
|
||||||
|
resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
|
||||||
|
resourceGV: "networking.k8s.io/v1",
|
||||||
|
resourceName: "networkpolicies",
|
||||||
|
resourceNamespaced: true,
|
||||||
|
networkPoliciesBackedUp: true,
|
||||||
|
expectedNetworkPoliciesBackedUp: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "list per namespace when not including *",
|
||||||
|
resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
|
||||||
|
resourceGroup: "apps",
|
||||||
|
resourceVersion: "v1beta1",
|
||||||
|
resourceGV: "apps/v1beta1",
|
||||||
|
resourceName: "deployments",
|
||||||
|
resourceNamespaced: true,
|
||||||
|
namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("a", "b"),
|
||||||
|
expectedListedNamespaces: []string{"a", "b"},
|
||||||
|
lists: []string{
|
||||||
|
`{
|
||||||
|
"apiVersion": "apps/v1beta1",
|
||||||
|
"kind": "DeploymentList",
|
||||||
|
"items": [
|
||||||
|
{
|
||||||
|
"metadata": {
|
||||||
|
"namespace": "a",
|
||||||
|
"name": "1"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}`,
|
||||||
|
`{
|
||||||
|
"apiVersion": "apps/v1beta1v1",
|
||||||
|
"kind": "DeploymentList",
|
||||||
|
"items": [
|
||||||
|
{
|
||||||
|
"metadata": {
|
||||||
|
"namespace": "b",
|
||||||
|
"name": "2"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}`,
|
||||||
|
},
|
||||||
|
expectedDeploymentsBackedUp: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "list all namespaces when including *",
|
||||||
|
resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
|
||||||
|
resourceGroup: "networking.k8s.io",
|
||||||
|
resourceVersion: "v1",
|
||||||
|
resourceGV: "networking.k8s.io/v1",
|
||||||
|
resourceName: "networkpolicies",
|
||||||
|
resourceNamespaced: true,
|
||||||
|
namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
|
||||||
|
expectedListedNamespaces: []string{""},
|
||||||
|
lists: []string{
|
||||||
|
`{
|
||||||
|
"apiVersion": "networking.k8s.io/v1",
|
||||||
|
"kind": "NetworkPolicyList",
|
||||||
|
"items": [
|
||||||
|
{
|
||||||
|
"metadata": {
|
||||||
|
"namespace": "a",
|
||||||
|
"name": "1"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}`,
|
||||||
|
},
|
||||||
|
expectedNetworkPoliciesBackedUp: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "list all namespaces when cluster-scoped, even with namespace includes",
|
||||||
|
resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
|
||||||
|
resourceGroup: "certificates.k8s.io",
|
||||||
|
resourceVersion: "v1beta1",
|
||||||
|
resourceGV: "certificates.k8s.io/v1beta1",
|
||||||
|
resourceName: "certificatesigningrequests",
|
||||||
|
resourceNamespaced: false,
|
||||||
|
namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("a"),
|
||||||
|
expectedListedNamespaces: []string{""},
|
||||||
|
labelSelector: "a=b",
|
||||||
|
lists: []string{
|
||||||
|
`{
|
||||||
|
"apiVersion": "certifiaces.k8s.io/v1beta1",
|
||||||
|
"kind": "CertificateSigningRequestList",
|
||||||
|
"items": [
|
||||||
|
{
|
||||||
|
"metadata": {
|
||||||
|
"name": "1",
|
||||||
|
"labels": {
|
||||||
|
"a": "b"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "use a custom action",
|
||||||
|
resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
|
||||||
|
resourceGroup: "certificates.k8s.io",
|
||||||
|
resourceVersion: "v1beta1",
|
||||||
|
resourceGV: "certificates.k8s.io/v1beta1",
|
||||||
|
resourceName: "certificatesigningrequests",
|
||||||
|
resourceNamespaced: false,
|
||||||
|
namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("a"),
|
||||||
|
expectedListedNamespaces: []string{""},
|
||||||
|
labelSelector: "a=b",
|
||||||
|
lists: []string{
|
||||||
|
`{
|
||||||
|
"apiVersion": "certificates.k8s.io/v1beta1",
|
||||||
|
"kind": "CertificateSigningRequestList",
|
||||||
|
"items": [
|
||||||
|
{
|
||||||
|
"metadata": {
|
||||||
|
"name": "1",
|
||||||
|
"labels": {
|
||||||
|
"a": "b"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}`,
|
||||||
|
},
|
||||||
|
actions: map[string]Action{
|
||||||
|
"certificatesigningrequests": &fakeAction{},
|
||||||
|
"other": &fakeAction{},
|
||||||
|
},
|
||||||
|
expectedActionIDs: map[string][]string{
|
||||||
|
"certificatesigningrequests": {"1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
var labelSelector *metav1.LabelSelector
|
||||||
|
if test.labelSelector != "" {
|
||||||
|
s, err := metav1.ParseToLabelSelector(test.labelSelector)
|
||||||
|
require.NoError(t, err)
|
||||||
|
labelSelector = s
|
||||||
|
}
|
||||||
|
|
||||||
|
log, _ := testlogger.NewNullLogger()
|
||||||
|
|
||||||
|
ctx := &backupContext{
|
||||||
|
backup: &v1.Backup{
|
||||||
|
Spec: v1.BackupSpec{
|
||||||
|
LabelSelector: labelSelector,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
resourceIncludesExcludes: test.resourceIncludesExcludes,
|
||||||
|
namespaceIncludesExcludes: test.namespaceIncludesExcludes,
|
||||||
|
deploymentsBackedUp: test.deploymentsBackedUp,
|
||||||
|
networkPoliciesBackedUp: test.networkPoliciesBackedUp,
|
||||||
|
logger: log,
|
||||||
|
}
|
||||||
|
|
||||||
|
group := &metav1.APIResourceList{
|
||||||
|
GroupVersion: test.resourceGV,
|
||||||
|
}
|
||||||
|
|
||||||
|
resource := metav1.APIResource{Name: test.resourceName, Namespaced: test.resourceNamespaced}
|
||||||
|
|
||||||
|
itemBackupper := &mockItemBackupper{}
|
||||||
|
|
||||||
|
var actualActionIDs map[string][]string
|
||||||
|
|
||||||
|
dynamicFactory := &arktest.FakeDynamicFactory{}
|
||||||
|
gvr := schema.GroupVersionResource{Group: test.resourceGroup, Version: test.resourceVersion}
|
||||||
|
gr := schema.GroupResource{Group: test.resourceGroup, Resource: test.resourceName}
|
||||||
|
for i, namespace := range test.expectedListedNamespaces {
|
||||||
|
obj := toRuntimeObject(t, test.lists[i])
|
||||||
|
|
||||||
|
client := &arktest.FakeDynamicClient{}
|
||||||
|
client.On("List", metav1.ListOptions{LabelSelector: test.labelSelector}).Return(obj, nil)
|
||||||
|
dynamicFactory.On("ClientForGroupVersionResource", gvr, resource, namespace).Return(client, nil)
|
||||||
|
|
||||||
|
action := test.actions[test.resourceName]
|
||||||
|
|
||||||
|
list, err := meta.ExtractList(obj)
|
||||||
|
require.NoError(t, err)
|
||||||
|
for i := range list {
|
||||||
|
item := list[i].(*unstructured.Unstructured)
|
||||||
|
itemBackupper.On("backupItem", ctx, item, gr).Return(nil)
|
||||||
|
if action != nil {
|
||||||
|
a, err := meta.Accessor(item)
|
||||||
|
require.NoError(t, err)
|
||||||
|
ns := a.GetNamespace()
|
||||||
|
name := a.GetName()
|
||||||
|
id := ns
|
||||||
|
if id != "" {
|
||||||
|
id += "/"
|
||||||
|
}
|
||||||
|
id += name
|
||||||
|
if actualActionIDs == nil {
|
||||||
|
actualActionIDs = make(map[string][]string)
|
||||||
|
}
|
||||||
|
actualActionIDs[test.resourceName] = append(actualActionIDs[test.resourceName], id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resources := map[schema.GroupVersionResource]schema.GroupVersionResource{
|
||||||
|
schema.GroupVersionResource{Resource: "certificatesigningrequests"}: schema.GroupVersionResource{Group: "certificates.k8s.io", Version: "v1beta1", Resource: "certificatesigningrequests"},
|
||||||
|
schema.GroupVersionResource{Resource: "other"}: schema.GroupVersionResource{Group: "somegroup", Version: "someversion", Resource: "otherthings"},
|
||||||
|
}
|
||||||
|
discoveryHelper := arktest.NewFakeDiscoveryHelper(false, resources)
|
||||||
|
|
||||||
|
podCommandExecutor := &arktest.PodCommandExecutor{}
|
||||||
|
defer podCommandExecutor.AssertExpectations(t)
|
||||||
|
|
||||||
|
kb, err := NewKubernetesBackupper(discoveryHelper, dynamicFactory, test.actions, podCommandExecutor)
|
||||||
|
require.NoError(t, err)
|
||||||
|
backupper := kb.(*kubernetesBackupper)
|
||||||
|
backupper.itemBackupper = itemBackupper
|
||||||
|
|
||||||
|
err = backupper.backupResource(ctx, group, resource)
|
||||||
|
|
||||||
|
assert.Equal(t, test.expectedDeploymentsBackedUp, ctx.deploymentsBackedUp)
|
||||||
|
assert.Equal(t, test.expectedNetworkPoliciesBackedUp, ctx.networkPoliciesBackedUp)
|
||||||
|
assert.Equal(t, test.expectedActionIDs, actualActionIDs)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*/
|
|
@ -18,12 +18,14 @@ package backup
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/util/clock"
|
"k8s.io/apimachinery/pkg/util/clock"
|
||||||
|
|
||||||
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||||
"github.com/heptio/ark/pkg/cloudprovider"
|
"github.com/heptio/ark/pkg/cloudprovider"
|
||||||
"github.com/heptio/ark/pkg/util/collections"
|
|
||||||
kubeutil "github.com/heptio/ark/pkg/util/kube"
|
kubeutil "github.com/heptio/ark/pkg/util/kube"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -38,8 +40,6 @@ type volumeSnapshotAction struct {
|
||||||
clock clock.Clock
|
clock clock.Clock
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ Action = &volumeSnapshotAction{}
|
|
||||||
|
|
||||||
func NewVolumeSnapshotAction(snapshotService cloudprovider.SnapshotService) (Action, error) {
|
func NewVolumeSnapshotAction(snapshotService cloudprovider.SnapshotService) (Action, error) {
|
||||||
if snapshotService == nil {
|
if snapshotService == nil {
|
||||||
return nil, errors.New("snapshotService cannot be nil")
|
return nil, errors.New("snapshotService cannot be nil")
|
||||||
|
@ -54,53 +54,56 @@ func NewVolumeSnapshotAction(snapshotService cloudprovider.SnapshotService) (Act
|
||||||
// Execute triggers a snapshot for the volume/disk underlying a PersistentVolume if the provided
|
// Execute triggers a snapshot for the volume/disk underlying a PersistentVolume if the provided
|
||||||
// backup has volume snapshots enabled and the PV is of a compatible type. Also records cloud
|
// backup has volume snapshots enabled and the PV is of a compatible type. Also records cloud
|
||||||
// disk type and IOPS (if applicable) to be able to restore to current state later.
|
// disk type and IOPS (if applicable) to be able to restore to current state later.
|
||||||
func (a *volumeSnapshotAction) Execute(ctx *backupContext, volume map[string]interface{}, backupper itemBackupper) error {
|
func (a *volumeSnapshotAction) Execute(log *logrus.Entry, item runtime.Unstructured, backup *api.Backup) ([]ResourceIdentifier, error) {
|
||||||
var (
|
var noAdditionalItems []ResourceIdentifier
|
||||||
backup = ctx.backup
|
|
||||||
backupName = kubeutil.NamespaceAndName(backup)
|
log.Info("Executing volumeSnapshotAction")
|
||||||
)
|
|
||||||
|
|
||||||
if backup.Spec.SnapshotVolumes != nil && !*backup.Spec.SnapshotVolumes {
|
if backup.Spec.SnapshotVolumes != nil && !*backup.Spec.SnapshotVolumes {
|
||||||
ctx.infof("Backup %q has volume snapshots disabled; skipping volume snapshot action.", backupName)
|
log.Info("Backup has volume snapshots disabled; skipping volume snapshot action.")
|
||||||
return nil
|
return noAdditionalItems, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
metadata := volume["metadata"].(map[string]interface{})
|
metadata, err := meta.Accessor(item)
|
||||||
name := metadata["name"].(string)
|
if err != nil {
|
||||||
|
return noAdditionalItems, errors.WithStack(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
name := metadata.GetName()
|
||||||
var pvFailureDomainZone string
|
var pvFailureDomainZone string
|
||||||
|
labels := metadata.GetLabels()
|
||||||
|
|
||||||
if labelsMap, err := collections.GetMap(metadata, "labels"); err != nil {
|
if labels[zoneLabel] != "" {
|
||||||
ctx.infof("error getting labels on PersistentVolume %q for backup %q: %v", name, backupName, err)
|
pvFailureDomainZone = labels[zoneLabel]
|
||||||
} else {
|
} else {
|
||||||
if labelsMap[zoneLabel] != nil {
|
log.Infof("label %q is not present on PersistentVolume", zoneLabel)
|
||||||
pvFailureDomainZone = labelsMap[zoneLabel].(string)
|
|
||||||
} else {
|
|
||||||
ctx.infof("label %q is not present on PersistentVolume %q for backup %q.", zoneLabel, name, backupName)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
volumeID, err := kubeutil.GetVolumeID(volume)
|
volumeID, err := kubeutil.GetVolumeID(item.UnstructuredContent())
|
||||||
// non-nil error means it's a supported PV source but volume ID can't be found
|
// non-nil error means it's a supported PV source but volume ID can't be found
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "error getting volume ID for backup %q, PersistentVolume %q", backupName, name)
|
return noAdditionalItems, errors.Wrapf(err, "error getting volume ID for PersistentVolume")
|
||||||
}
|
}
|
||||||
// no volumeID / nil error means unsupported PV source
|
// no volumeID / nil error means unsupported PV source
|
||||||
if volumeID == "" {
|
if volumeID == "" {
|
||||||
ctx.infof("Backup %q: PersistentVolume %q is not a supported volume type for snapshots, skipping.", backupName, name)
|
log.Info("PersistentVolume is not a supported volume type for snapshots, skipping.")
|
||||||
return nil
|
return noAdditionalItems, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx.infof("Backup %q: snapshotting PersistentVolume %q, volume-id %q", backupName, name, volumeID)
|
log = log.WithField("volumeID", volumeID)
|
||||||
|
|
||||||
|
log.Info("Snapshotting PersistentVolume")
|
||||||
snapshotID, err := a.snapshotService.CreateSnapshot(volumeID, pvFailureDomainZone)
|
snapshotID, err := a.snapshotService.CreateSnapshot(volumeID, pvFailureDomainZone)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx.infof("error creating snapshot for backup %q, volume %q, volume-id %q: %v", backupName, name, volumeID, err)
|
// log+error on purpose - log goes to the per-backup log file, error goes to the backup
|
||||||
return err
|
log.WithError(err).Error("error creating snapshot")
|
||||||
|
return noAdditionalItems, errors.WithMessage(err, "error creating snapshot")
|
||||||
}
|
}
|
||||||
|
|
||||||
volumeType, iops, err := a.snapshotService.GetVolumeInfo(volumeID, pvFailureDomainZone)
|
volumeType, iops, err := a.snapshotService.GetVolumeInfo(volumeID, pvFailureDomainZone)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx.infof("error getting volume info for backup %q, volume %q, volume-id %q: %v", backupName, name, volumeID, err)
|
log.WithError(err).Error("error getting volume info")
|
||||||
return err
|
return noAdditionalItems, errors.WithMessage(err, "error getting volume info")
|
||||||
}
|
}
|
||||||
|
|
||||||
if backup.Status.VolumeBackups == nil {
|
if backup.Status.VolumeBackups == nil {
|
||||||
|
@ -114,5 +117,5 @@ func (a *volumeSnapshotAction) Execute(ctx *backupContext, volume map[string]int
|
||||||
AvailabilityZone: pvFailureDomainZone,
|
AvailabilityZone: pvFailureDomainZone,
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return noAdditionalItems, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,14 +21,15 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
testlogger "github.com/sirupsen/logrus/hooks/test"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
"k8s.io/apimachinery/pkg/util/clock"
|
"k8s.io/apimachinery/pkg/util/clock"
|
||||||
|
|
||||||
"github.com/heptio/ark/pkg/apis/ark/v1"
|
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||||
. "github.com/heptio/ark/pkg/util/test"
|
arktest "github.com/heptio/ark/pkg/util/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestVolumeSnapshotAction(t *testing.T) {
|
func TestVolumeSnapshotAction(t *testing.T) {
|
||||||
|
@ -185,7 +186,7 @@ func TestVolumeSnapshotAction(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
snapshotService := &FakeSnapshotService{SnapshottableVolumes: test.volumeInfo}
|
snapshotService := &arktest.FakeSnapshotService{SnapshottableVolumes: test.volumeInfo}
|
||||||
|
|
||||||
vsa, _ := NewVolumeSnapshotAction(snapshotService)
|
vsa, _ := NewVolumeSnapshotAction(snapshotService)
|
||||||
action := vsa.(*volumeSnapshotAction)
|
action := vsa.(*volumeSnapshotAction)
|
||||||
|
@ -198,15 +199,9 @@ func TestVolumeSnapshotAction(t *testing.T) {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log, _ := testlogger.NewNullLogger()
|
|
||||||
|
|
||||||
ctx := &backupContext{
|
|
||||||
backup: backup,
|
|
||||||
logger: log,
|
|
||||||
}
|
|
||||||
|
|
||||||
// method under test
|
// method under test
|
||||||
err = action.Execute(ctx, pv, nil)
|
additionalItems, err := action.Execute(arktest.NewLogger(), &unstructured.Unstructured{Object: pv}, backup)
|
||||||
|
assert.Len(t, additionalItems, 0)
|
||||||
|
|
||||||
gotErr := err != nil
|
gotErr := err != nil
|
||||||
|
|
||||||
|
|
|
@ -30,12 +30,9 @@ import (
|
||||||
// DynamicFactory contains methods for retrieving dynamic clients for GroupVersionResources and
|
// DynamicFactory contains methods for retrieving dynamic clients for GroupVersionResources and
|
||||||
// GroupVersionKinds.
|
// GroupVersionKinds.
|
||||||
type DynamicFactory interface {
|
type DynamicFactory interface {
|
||||||
// ClientForGroupVersionResource returns a Dynamic client for the given Group and Version
|
// ClientForGroupVersionResource returns a Dynamic client for the given group/version
|
||||||
// (specified in gvr) and Resource (specified in resource) for the given namespace.
|
// and resource for the given namespace.
|
||||||
ClientForGroupVersionResource(gvr schema.GroupVersionResource, resource metav1.APIResource, namespace string) (Dynamic, error)
|
ClientForGroupVersionResource(gv schema.GroupVersion, resource metav1.APIResource, namespace string) (Dynamic, error)
|
||||||
// ClientForGroupVersionKind returns a Dynamic client for the given Group and Version
|
|
||||||
// (specified in gvk) and Resource (specified in resource) for the given namespace.
|
|
||||||
ClientForGroupVersionKind(gvk schema.GroupVersionKind, resource metav1.APIResource, namespace string) (Dynamic, error)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// dynamicFactory implements DynamicFactory.
|
// dynamicFactory implements DynamicFactory.
|
||||||
|
@ -43,17 +40,17 @@ type dynamicFactory struct {
|
||||||
clientPool dynamic.ClientPool
|
clientPool dynamic.ClientPool
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ DynamicFactory = &dynamicFactory{}
|
|
||||||
|
|
||||||
// NewDynamicFactory returns a new ClientPool-based dynamic factory.
|
// NewDynamicFactory returns a new ClientPool-based dynamic factory.
|
||||||
func NewDynamicFactory(clientPool dynamic.ClientPool) DynamicFactory {
|
func NewDynamicFactory(clientPool dynamic.ClientPool) DynamicFactory {
|
||||||
return &dynamicFactory{clientPool: clientPool}
|
return &dynamicFactory{clientPool: clientPool}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *dynamicFactory) ClientForGroupVersionResource(gvr schema.GroupVersionResource, resource metav1.APIResource, namespace string) (Dynamic, error) {
|
func (f *dynamicFactory) ClientForGroupVersionResource(gv schema.GroupVersion, resource metav1.APIResource, namespace string) (Dynamic, error) {
|
||||||
dynamicClient, err := f.clientPool.ClientForGroupVersionResource(gvr)
|
// client-go doesn't actually use the kind when getting the dynamic client from the client pool;
|
||||||
|
// it only needs the group and version.
|
||||||
|
dynamicClient, err := f.clientPool.ClientForGroupVersionKind(gv.WithKind(""))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error getting client for GroupVersionResource %s", gvr)
|
return nil, errors.Wrapf(err, "error getting client for GroupVersion %s, Resource %s", gv.String, resource.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
return &dynamicResourceClient{
|
return &dynamicResourceClient{
|
||||||
|
@ -61,27 +58,36 @@ func (f *dynamicFactory) ClientForGroupVersionResource(gvr schema.GroupVersionRe
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *dynamicFactory) ClientForGroupVersionKind(gvk schema.GroupVersionKind, resource metav1.APIResource, namespace string) (Dynamic, error) {
|
// Creator creates an object.
|
||||||
dynamicClient, err := f.clientPool.ClientForGroupVersionKind(gvk)
|
type Creator interface {
|
||||||
if err != nil {
|
// Create creates an object.
|
||||||
return nil, errors.Wrapf(err, "error getting client for GroupVersionKind %s", gvk)
|
Create(obj *unstructured.Unstructured) (*unstructured.Unstructured, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &dynamicResourceClient{
|
// Lister lists objects.
|
||||||
resourceClient: dynamicClient.Resource(&resource, namespace),
|
type Lister interface {
|
||||||
}, nil
|
// List lists all the objects of a given resource.
|
||||||
|
List(metav1.ListOptions) (runtime.Object, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Watcher watches objects.
|
||||||
|
type Watcher interface {
|
||||||
|
// Watch watches for changes to objects of a given resource.
|
||||||
|
Watch(metav1.ListOptions) (watch.Interface, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Getter gets an object.
|
||||||
|
type Getter interface {
|
||||||
|
// Get fetches an object by name.
|
||||||
|
Get(name string, opts metav1.GetOptions) (*unstructured.Unstructured, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Dynamic contains client methods that Ark needs for backing up and restoring resources.
|
// Dynamic contains client methods that Ark needs for backing up and restoring resources.
|
||||||
type Dynamic interface {
|
type Dynamic interface {
|
||||||
// Create creates an object.
|
Creator
|
||||||
Create(obj *unstructured.Unstructured) (*unstructured.Unstructured, error)
|
Lister
|
||||||
// List lists all the objects of a given resource.
|
Watcher
|
||||||
List(metav1.ListOptions) (runtime.Object, error)
|
Getter
|
||||||
// Watch watches for changes to objects of a given resource.
|
|
||||||
Watch(metav1.ListOptions) (watch.Interface, error)
|
|
||||||
// Get fetches an object by name.
|
|
||||||
Get(name string, opts metav1.GetOptions) (*unstructured.Unstructured, error)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// dynamicResourceClient implements Dynamic.
|
// dynamicResourceClient implements Dynamic.
|
||||||
|
|
|
@ -39,7 +39,9 @@ import (
|
||||||
"k8s.io/client-go/dynamic"
|
"k8s.io/client-go/dynamic"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/client-go/kubernetes/scheme"
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
|
kcorev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
"k8s.io/client-go/pkg/api/v1"
|
"k8s.io/client-go/pkg/api/v1"
|
||||||
|
"k8s.io/client-go/rest"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
|
|
||||||
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||||
|
@ -134,6 +136,7 @@ func getSortedLogLevels() []string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type server struct {
|
type server struct {
|
||||||
|
kubeClientConfig *rest.Config
|
||||||
kubeClient kubernetes.Interface
|
kubeClient kubernetes.Interface
|
||||||
arkClient clientset.Interface
|
arkClient clientset.Interface
|
||||||
backupService cloudprovider.BackupService
|
backupService cloudprovider.BackupService
|
||||||
|
@ -165,6 +168,7 @@ func newServer(kubeconfig, baseName string, logger *logrus.Logger) (*server, err
|
||||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||||
|
|
||||||
s := &server{
|
s := &server{
|
||||||
|
kubeClientConfig: clientConfig,
|
||||||
kubeClient: kubeClient,
|
kubeClient: kubeClient,
|
||||||
arkClient: arkClient,
|
arkClient: arkClient,
|
||||||
discoveryClient: arkClient.Discovery(),
|
discoveryClient: arkClient.Discovery(),
|
||||||
|
@ -502,7 +506,7 @@ func (s *server) runControllers(config *api.Config) error {
|
||||||
if config.RestoreOnlyMode {
|
if config.RestoreOnlyMode {
|
||||||
s.logger.Info("Restore only mode - not starting the backup, schedule or GC controllers")
|
s.logger.Info("Restore only mode - not starting the backup, schedule or GC controllers")
|
||||||
} else {
|
} else {
|
||||||
backupper, err := newBackupper(discoveryHelper, s.clientPool, s.backupService, s.snapshotService)
|
backupper, err := newBackupper(discoveryHelper, s.clientPool, s.backupService, s.snapshotService, s.kubeClientConfig, s.kubeClient.CoreV1())
|
||||||
cmd.CheckError(err)
|
cmd.CheckError(err)
|
||||||
backupController := controller.NewBackupController(
|
backupController := controller.NewBackupController(
|
||||||
s.sharedInformerFactory.Ark().V1().Backups(),
|
s.sharedInformerFactory.Ark().V1().Backups(),
|
||||||
|
@ -610,23 +614,27 @@ func newBackupper(
|
||||||
clientPool dynamic.ClientPool,
|
clientPool dynamic.ClientPool,
|
||||||
backupService cloudprovider.BackupService,
|
backupService cloudprovider.BackupService,
|
||||||
snapshotService cloudprovider.SnapshotService,
|
snapshotService cloudprovider.SnapshotService,
|
||||||
|
kubeClientConfig *rest.Config,
|
||||||
|
kubeCoreV1Client kcorev1client.CoreV1Interface,
|
||||||
) (backup.Backupper, error) {
|
) (backup.Backupper, error) {
|
||||||
actions := map[string]backup.Action{}
|
actions := map[string]backup.Action{}
|
||||||
|
dynamicFactory := client.NewDynamicFactory(clientPool)
|
||||||
|
|
||||||
if snapshotService != nil {
|
if snapshotService != nil {
|
||||||
action, err := backup.NewVolumeSnapshotAction(snapshotService)
|
action, err := backup.NewVolumeSnapshotAction(snapshotService)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
actions["persistentvolumes"] = action
|
actions["persistentvolumes"] = action
|
||||||
|
|
||||||
actions["persistentvolumeclaims"] = backup.NewBackupPVAction()
|
actions["persistentvolumeclaims"] = backup.NewBackupPVAction()
|
||||||
}
|
}
|
||||||
|
|
||||||
return backup.NewKubernetesBackupper(
|
return backup.NewKubernetesBackupper(
|
||||||
discoveryHelper,
|
discoveryHelper,
|
||||||
client.NewDynamicFactory(clientPool),
|
dynamicFactory,
|
||||||
actions,
|
actions,
|
||||||
|
backup.NewPodCommandExecutor(kubeClientConfig, kubeCoreV1Client.RESTClient()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -229,16 +229,6 @@ func (controller *backupController) processBackup(key string) error {
|
||||||
// set backup version
|
// set backup version
|
||||||
backup.Status.Version = backupVersion
|
backup.Status.Version = backupVersion
|
||||||
|
|
||||||
// included resources defaulting
|
|
||||||
if len(backup.Spec.IncludedResources) == 0 {
|
|
||||||
backup.Spec.IncludedResources = []string{"*"}
|
|
||||||
}
|
|
||||||
|
|
||||||
// included namespace defaulting
|
|
||||||
if len(backup.Spec.IncludedNamespaces) == 0 {
|
|
||||||
backup.Spec.IncludedNamespaces = []string{"*"}
|
|
||||||
}
|
|
||||||
|
|
||||||
// calculate expiration
|
// calculate expiration
|
||||||
if backup.Spec.TTL.Duration > 0 {
|
if backup.Spec.TTL.Duration > 0 {
|
||||||
backup.Status.Expiration = metav1.NewTime(controller.clock.Now().Add(backup.Spec.TTL.Duration))
|
backup.Status.Expiration = metav1.NewTime(controller.clock.Now().Add(backup.Spec.TTL.Duration))
|
||||||
|
|
|
@ -118,18 +118,16 @@ func TestProcessBackup(t *testing.T) {
|
||||||
expectBackup: true,
|
expectBackup: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "if includednamespaces are specified, don't default to *",
|
name: "if includednamespaces are specified, don't default to *",
|
||||||
key: "heptio-ark/backup1",
|
key: "heptio-ark/backup1",
|
||||||
backup: NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithIncludedNamespaces("ns-1"),
|
backup: NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithIncludedNamespaces("ns-1"),
|
||||||
expectedIncludes: []string{"*"},
|
expectBackup: true,
|
||||||
expectBackup: true,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "ttl",
|
name: "ttl",
|
||||||
key: "heptio-ark/backup1",
|
key: "heptio-ark/backup1",
|
||||||
backup: NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithTTL(10 * time.Minute),
|
backup: NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithTTL(10 * time.Minute),
|
||||||
expectedIncludes: []string{"*"},
|
expectBackup: true,
|
||||||
expectBackup: true,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "backup with SnapshotVolumes when allowSnapshots=false fails validation",
|
name: "backup with SnapshotVolumes when allowSnapshots=false fails validation",
|
||||||
|
@ -138,12 +136,11 @@ func TestProcessBackup(t *testing.T) {
|
||||||
expectBackup: false,
|
expectBackup: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "backup with SnapshotVolumes when allowSnapshots=true gets executed",
|
name: "backup with SnapshotVolumes when allowSnapshots=true gets executed",
|
||||||
key: "heptio-ark/backup1",
|
key: "heptio-ark/backup1",
|
||||||
backup: NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithSnapshotVolumes(true),
|
backup: NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithSnapshotVolumes(true),
|
||||||
allowSnapshots: true,
|
allowSnapshots: true,
|
||||||
expectedIncludes: []string{"*"},
|
expectBackup: true,
|
||||||
expectBackup: true,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -170,8 +167,6 @@ func TestProcessBackup(t *testing.T) {
|
||||||
|
|
||||||
var expiration time.Time
|
var expiration time.Time
|
||||||
|
|
||||||
var expectedNSes []string
|
|
||||||
|
|
||||||
if test.backup != nil {
|
if test.backup != nil {
|
||||||
// add directly to the informer's store so the lister can function and so we don't have to
|
// add directly to the informer's store so the lister can function and so we don't have to
|
||||||
// start the shared informers.
|
// start the shared informers.
|
||||||
|
@ -187,14 +182,7 @@ func TestProcessBackup(t *testing.T) {
|
||||||
backup := copy.(*v1.Backup)
|
backup := copy.(*v1.Backup)
|
||||||
backup.Spec.IncludedResources = test.expectedIncludes
|
backup.Spec.IncludedResources = test.expectedIncludes
|
||||||
backup.Spec.ExcludedResources = test.expectedExcludes
|
backup.Spec.ExcludedResources = test.expectedExcludes
|
||||||
|
backup.Spec.IncludedNamespaces = test.backup.Spec.IncludedNamespaces
|
||||||
if test.backup.Spec.IncludedNamespaces == nil {
|
|
||||||
expectedNSes = []string{"*"}
|
|
||||||
} else {
|
|
||||||
expectedNSes = test.backup.Spec.IncludedNamespaces
|
|
||||||
}
|
|
||||||
|
|
||||||
backup.Spec.IncludedNamespaces = expectedNSes
|
|
||||||
backup.Spec.SnapshotVolumes = test.backup.Spec.SnapshotVolumes
|
backup.Spec.SnapshotVolumes = test.backup.Spec.SnapshotVolumes
|
||||||
backup.Status.Phase = v1.BackupPhaseInProgress
|
backup.Status.Phase = v1.BackupPhaseInProgress
|
||||||
backup.Status.Expiration.Time = expiration
|
backup.Status.Expiration.Time = expiration
|
||||||
|
@ -240,7 +228,7 @@ func TestProcessBackup(t *testing.T) {
|
||||||
WithPhase(v1.BackupPhaseInProgress).
|
WithPhase(v1.BackupPhaseInProgress).
|
||||||
WithIncludedResources(test.expectedIncludes...).
|
WithIncludedResources(test.expectedIncludes...).
|
||||||
WithExcludedResources(test.expectedExcludes...).
|
WithExcludedResources(test.expectedExcludes...).
|
||||||
WithIncludedNamespaces(expectedNSes...).
|
WithIncludedNamespaces(test.backup.Spec.IncludedNamespaces...).
|
||||||
WithTTL(test.backup.Spec.TTL.Duration).
|
WithTTL(test.backup.Spec.TTL.Duration).
|
||||||
WithSnapshotVolumesPointer(test.backup.Spec.SnapshotVolumes).
|
WithSnapshotVolumesPointer(test.backup.Spec.SnapshotVolumes).
|
||||||
WithExpiration(expiration).
|
WithExpiration(expiration).
|
||||||
|
@ -256,7 +244,7 @@ func TestProcessBackup(t *testing.T) {
|
||||||
WithPhase(v1.BackupPhaseCompleted).
|
WithPhase(v1.BackupPhaseCompleted).
|
||||||
WithIncludedResources(test.expectedIncludes...).
|
WithIncludedResources(test.expectedIncludes...).
|
||||||
WithExcludedResources(test.expectedExcludes...).
|
WithExcludedResources(test.expectedExcludes...).
|
||||||
WithIncludedNamespaces(expectedNSes...).
|
WithIncludedNamespaces(test.backup.Spec.IncludedNamespaces...).
|
||||||
WithTTL(test.backup.Spec.TTL.Duration).
|
WithTTL(test.backup.Spec.TTL.Duration).
|
||||||
WithSnapshotVolumesPointer(test.backup.Spec.SnapshotVolumes).
|
WithSnapshotVolumesPointer(test.backup.Spec.SnapshotVolumes).
|
||||||
WithExpiration(expiration).
|
WithExpiration(expiration).
|
||||||
|
|
|
@ -231,14 +231,6 @@ func (controller *restoreController) processRestore(key string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// defaulting
|
|
||||||
if len(restore.Spec.IncludedNamespaces) == 0 {
|
|
||||||
restore.Spec.IncludedNamespaces = []string{"*"}
|
|
||||||
}
|
|
||||||
if len(restore.Spec.IncludedResources) == 0 {
|
|
||||||
restore.Spec.IncludedResources = []string{"*"}
|
|
||||||
}
|
|
||||||
|
|
||||||
excludedResources := sets.NewString(restore.Spec.ExcludedResources...)
|
excludedResources := sets.NewString(restore.Spec.ExcludedResources...)
|
||||||
for _, nonrestorable := range nonRestorableResources {
|
for _, nonrestorable := range nonRestorableResources {
|
||||||
if !excludedResources.Has(nonrestorable) {
|
if !excludedResources.Has(nonrestorable) {
|
||||||
|
|
|
@ -175,7 +175,7 @@ func TestProcessRestore(t *testing.T) {
|
||||||
restore: NewRestore("foo", "bar", "", "ns-1", "", api.RestorePhaseNew).Restore,
|
restore: NewRestore("foo", "bar", "", "ns-1", "", api.RestorePhaseNew).Restore,
|
||||||
expectedErr: false,
|
expectedErr: false,
|
||||||
expectedRestoreUpdates: []*api.Restore{
|
expectedRestoreUpdates: []*api.Restore{
|
||||||
NewRestore("foo", "bar", "", "ns-1", "*", api.RestorePhaseFailedValidation).
|
NewRestore("foo", "bar", "", "ns-1", "", api.RestorePhaseFailedValidation).
|
||||||
WithValidationError("BackupName must be non-empty and correspond to the name of a backup in object storage.").
|
WithValidationError("BackupName must be non-empty and correspond to the name of a backup in object storage.").
|
||||||
Restore,
|
Restore,
|
||||||
},
|
},
|
||||||
|
@ -187,8 +187,8 @@ func TestProcessRestore(t *testing.T) {
|
||||||
expectedErr: false,
|
expectedErr: false,
|
||||||
backupServiceGetBackupError: errors.New("no backup here"),
|
backupServiceGetBackupError: errors.New("no backup here"),
|
||||||
expectedRestoreUpdates: []*api.Restore{
|
expectedRestoreUpdates: []*api.Restore{
|
||||||
NewRestore("foo", "bar", "backup-1", "ns-1", "*", api.RestorePhaseInProgress).Restore,
|
NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseInProgress).Restore,
|
||||||
NewRestore("foo", "bar", "backup-1", "ns-1", "*", api.RestorePhaseCompleted).
|
NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseCompleted).
|
||||||
WithErrors(api.RestoreResult{
|
WithErrors(api.RestoreResult{
|
||||||
Ark: []string{"no backup here"},
|
Ark: []string{"no backup here"},
|
||||||
}).
|
}).
|
||||||
|
@ -202,8 +202,8 @@ func TestProcessRestore(t *testing.T) {
|
||||||
restorerError: errors.New("blarg"),
|
restorerError: errors.New("blarg"),
|
||||||
expectedErr: false,
|
expectedErr: false,
|
||||||
expectedRestoreUpdates: []*api.Restore{
|
expectedRestoreUpdates: []*api.Restore{
|
||||||
NewRestore("foo", "bar", "backup-1", "ns-1", "*", api.RestorePhaseInProgress).Restore,
|
NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseInProgress).Restore,
|
||||||
NewRestore("foo", "bar", "backup-1", "ns-1", "*", api.RestorePhaseCompleted).
|
NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseCompleted).
|
||||||
WithErrors(api.RestoreResult{
|
WithErrors(api.RestoreResult{
|
||||||
Namespaces: map[string][]string{
|
Namespaces: map[string][]string{
|
||||||
"ns-1": {"blarg"},
|
"ns-1": {"blarg"},
|
||||||
|
@ -211,7 +211,7 @@ func TestProcessRestore(t *testing.T) {
|
||||||
}).
|
}).
|
||||||
Restore,
|
Restore,
|
||||||
},
|
},
|
||||||
expectedRestorerCall: NewRestore("foo", "bar", "backup-1", "ns-1", "*", api.RestorePhaseInProgress).Restore,
|
expectedRestorerCall: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseInProgress).Restore,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "valid restore gets executed",
|
name: "valid restore gets executed",
|
||||||
|
@ -219,21 +219,10 @@ func TestProcessRestore(t *testing.T) {
|
||||||
backup: NewTestBackup().WithName("backup-1").Backup,
|
backup: NewTestBackup().WithName("backup-1").Backup,
|
||||||
expectedErr: false,
|
expectedErr: false,
|
||||||
expectedRestoreUpdates: []*api.Restore{
|
expectedRestoreUpdates: []*api.Restore{
|
||||||
NewRestore("foo", "bar", "backup-1", "ns-1", "*", api.RestorePhaseInProgress).Restore,
|
NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseInProgress).Restore,
|
||||||
NewRestore("foo", "bar", "backup-1", "ns-1", "*", api.RestorePhaseCompleted).Restore,
|
NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseCompleted).Restore,
|
||||||
},
|
},
|
||||||
expectedRestorerCall: NewRestore("foo", "bar", "backup-1", "ns-1", "*", api.RestorePhaseInProgress).Restore,
|
expectedRestorerCall: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseInProgress).Restore,
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "restore with no restorable namespaces gets defaulted to *",
|
|
||||||
restore: NewRestore("foo", "bar", "backup-1", "", "", api.RestorePhaseNew).Restore,
|
|
||||||
backup: NewTestBackup().WithName("backup-1").Backup,
|
|
||||||
expectedErr: false,
|
|
||||||
expectedRestoreUpdates: []*api.Restore{
|
|
||||||
NewRestore("foo", "bar", "backup-1", "*", "*", api.RestorePhaseInProgress).Restore,
|
|
||||||
NewRestore("foo", "bar", "backup-1", "*", "*", api.RestorePhaseCompleted).Restore,
|
|
||||||
},
|
|
||||||
expectedRestorerCall: NewRestore("foo", "bar", "backup-1", "*", "*", api.RestorePhaseInProgress).Restore,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "valid restore with RestorePVs=true gets executed when allowRestoreSnapshots=true",
|
name: "valid restore with RestorePVs=true gets executed when allowRestoreSnapshots=true",
|
||||||
|
@ -242,10 +231,10 @@ func TestProcessRestore(t *testing.T) {
|
||||||
allowRestoreSnapshots: true,
|
allowRestoreSnapshots: true,
|
||||||
expectedErr: false,
|
expectedErr: false,
|
||||||
expectedRestoreUpdates: []*api.Restore{
|
expectedRestoreUpdates: []*api.Restore{
|
||||||
NewRestore("foo", "bar", "backup-1", "ns-1", "*", api.RestorePhaseInProgress).WithRestorePVs(true).Restore,
|
NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseInProgress).WithRestorePVs(true).Restore,
|
||||||
NewRestore("foo", "bar", "backup-1", "ns-1", "*", api.RestorePhaseCompleted).WithRestorePVs(true).Restore,
|
NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseCompleted).WithRestorePVs(true).Restore,
|
||||||
},
|
},
|
||||||
expectedRestorerCall: NewRestore("foo", "bar", "backup-1", "ns-1", "*", api.RestorePhaseInProgress).WithRestorePVs(true).Restore,
|
expectedRestorerCall: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseInProgress).WithRestorePVs(true).Restore,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "restore with RestorePVs=true fails validation when allowRestoreSnapshots=false",
|
name: "restore with RestorePVs=true fails validation when allowRestoreSnapshots=false",
|
||||||
|
@ -253,7 +242,7 @@ func TestProcessRestore(t *testing.T) {
|
||||||
backup: NewTestBackup().WithName("backup-1").Backup,
|
backup: NewTestBackup().WithName("backup-1").Backup,
|
||||||
expectedErr: false,
|
expectedErr: false,
|
||||||
expectedRestoreUpdates: []*api.Restore{
|
expectedRestoreUpdates: []*api.Restore{
|
||||||
NewRestore("foo", "bar", "backup-1", "ns-1", "*", api.RestorePhaseFailedValidation).
|
NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseFailedValidation).
|
||||||
WithRestorePVs(true).
|
WithRestorePVs(true).
|
||||||
WithValidationError("Server is not configured for PV snapshot restores").
|
WithValidationError("Server is not configured for PV snapshot restores").
|
||||||
Restore,
|
Restore,
|
||||||
|
|
|
@ -442,7 +442,7 @@ func (ctx *context) restoreResource(resource, namespace, resourcePath string) (a
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
resourceClient, err = ctx.dynamicFactory.ClientForGroupVersionKind(obj.GroupVersionKind(), resource, namespace)
|
resourceClient, err = ctx.dynamicFactory.ClientForGroupVersionResource(obj.GroupVersionKind().GroupVersion(), resource, namespace)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
addArkError(&errs, fmt.Errorf("error getting resource client for namespace %q, resource %q: %v", namespace, &groupResource, err))
|
addArkError(&errs, fmt.Errorf("error getting resource client for namespace %q, resource %q: %v", namespace, &groupResource, err))
|
||||||
return warnings, errs
|
return warnings, errs
|
||||||
|
|
|
@ -405,8 +405,8 @@ func TestRestoreResourceForNamespace(t *testing.T) {
|
||||||
|
|
||||||
dynamicFactory := &FakeDynamicFactory{}
|
dynamicFactory := &FakeDynamicFactory{}
|
||||||
resource := metav1.APIResource{Name: "configmaps", Namespaced: true}
|
resource := metav1.APIResource{Name: "configmaps", Namespaced: true}
|
||||||
gvk := schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"}
|
gv := schema.GroupVersion{Group: "", Version: "v1"}
|
||||||
dynamicFactory.On("ClientForGroupVersionKind", gvk, resource, test.namespace).Return(resourceClient, nil)
|
dynamicFactory.On("ClientForGroupVersionResource", gv, resource, test.namespace).Return(resourceClient, nil)
|
||||||
|
|
||||||
log, _ := testlogger.NewNullLogger()
|
log, _ := testlogger.NewNullLogger()
|
||||||
|
|
||||||
|
|
|
@ -17,6 +17,8 @@ limitations under the License.
|
||||||
package collections
|
package collections
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
|
@ -70,13 +72,33 @@ func (ie *IncludesExcludes) ShouldInclude(s string) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
return ie.includes.Has("*") || ie.includes.Has(s)
|
// len=0 means include everything
|
||||||
|
return ie.includes.Len() == 0 || ie.includes.Has("*") || ie.includes.Has(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IncludeEverything returns true if the Includes list is '*'
|
// IncludesString returns a string containing all of the includes, separated by commas, or * if the
|
||||||
// and the Excludes list is empty, or false otherwise.
|
// list is empty.
|
||||||
|
func (ie *IncludesExcludes) IncludesString() string {
|
||||||
|
return asString(ie.GetIncludes())
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExcludesString returns a string containing all of the excludes, separated by commas, or * if the
|
||||||
|
// list is empty.
|
||||||
|
func (ie *IncludesExcludes) ExcludesString() string {
|
||||||
|
return asString(ie.GetExcludes())
|
||||||
|
}
|
||||||
|
|
||||||
|
func asString(in []string) string {
|
||||||
|
if len(in) == 0 {
|
||||||
|
return "*"
|
||||||
|
}
|
||||||
|
return strings.Join(in, ", ")
|
||||||
|
}
|
||||||
|
|
||||||
|
// IncludeEverything returns true if the includes list is empty or '*'
|
||||||
|
// and the excludes list is empty, or false otherwise.
|
||||||
func (ie *IncludesExcludes) IncludeEverything() bool {
|
func (ie *IncludesExcludes) IncludeEverything() bool {
|
||||||
return ie.excludes.Len() == 0 && ie.includes.Len() == 1 && ie.includes.Has("*")
|
return ie.excludes.Len() == 0 && (ie.includes.Len() == 0 || (ie.includes.Len() == 1 && ie.includes.Has("*")))
|
||||||
}
|
}
|
||||||
|
|
||||||
// ValidateIncludesExcludes checks provided lists of included and excluded
|
// ValidateIncludesExcludes checks provided lists of included and excluded
|
||||||
|
@ -91,10 +113,6 @@ func ValidateIncludesExcludes(includesList, excludesList []string) []error {
|
||||||
includes := sets.NewString(includesList...)
|
includes := sets.NewString(includesList...)
|
||||||
excludes := sets.NewString(excludesList...)
|
excludes := sets.NewString(excludesList...)
|
||||||
|
|
||||||
if includes.Len() == 0 {
|
|
||||||
errs = append(errs, errors.New("includes list cannot be empty"))
|
|
||||||
}
|
|
||||||
|
|
||||||
if includes.Len() > 1 && includes.Has("*") {
|
if includes.Len() > 1 && includes.Has("*") {
|
||||||
errs = append(errs, errors.New("includes list must either contain '*' only, or a non-empty list of items"))
|
errs = append(errs, errors.New("includes list must either contain '*' only, or a non-empty list of items"))
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,9 +34,9 @@ func TestShouldInclude(t *testing.T) {
|
||||||
should bool
|
should bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "empty - don't include anything",
|
name: "empty - include everything",
|
||||||
check: "foo",
|
check: "foo",
|
||||||
should: false,
|
should: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "include *",
|
name: "include *",
|
||||||
|
@ -97,9 +97,8 @@ func TestValidateIncludesExcludes(t *testing.T) {
|
||||||
expected []error
|
expected []error
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "include nothing not allowed",
|
name: "empty includes (everything) is allowed",
|
||||||
includes: []string{},
|
includes: []string{},
|
||||||
expected: []error{errors.New("includes list cannot be empty")},
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "include everything",
|
name: "include everything",
|
||||||
|
|
|
@ -32,12 +32,10 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// NamespaceAndName returns a string in the format <namespace>/<name>
|
// NamespaceAndName returns a string in the format <namespace>/<name>
|
||||||
func NamespaceAndName(metaAccessor metav1.ObjectMetaAccessor) string {
|
func NamespaceAndName(objMeta metav1.Object) string {
|
||||||
objMeta := metaAccessor.GetObjectMeta()
|
if objMeta.GetNamespace() == "" {
|
||||||
if objMeta == nil {
|
return objMeta.GetName()
|
||||||
return ""
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Sprintf("%s/%s", objMeta.GetNamespace(), objMeta.GetName())
|
return fmt.Sprintf("%s/%s", objMeta.GetNamespace(), objMeta.GetName())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -34,13 +34,8 @@ type FakeDynamicFactory struct {
|
||||||
|
|
||||||
var _ client.DynamicFactory = &FakeDynamicFactory{}
|
var _ client.DynamicFactory = &FakeDynamicFactory{}
|
||||||
|
|
||||||
func (df *FakeDynamicFactory) ClientForGroupVersionResource(gvr schema.GroupVersionResource, resource metav1.APIResource, namespace string) (client.Dynamic, error) {
|
func (df *FakeDynamicFactory) ClientForGroupVersionResource(gv schema.GroupVersion, resource metav1.APIResource, namespace string) (client.Dynamic, error) {
|
||||||
args := df.Called(gvr, resource, namespace)
|
args := df.Called(gv, resource, namespace)
|
||||||
return args.Get(0).(client.Dynamic), args.Error(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (df *FakeDynamicFactory) ClientForGroupVersionKind(gvk schema.GroupVersionKind, resource metav1.APIResource, namespace string) (client.Dynamic, error) {
|
|
||||||
args := df.Called(gvk, resource, namespace)
|
|
||||||
return args.Get(0).(client.Dynamic), args.Error(1)
|
return args.Get(0).(client.Dynamic), args.Error(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -17,8 +17,7 @@ limitations under the License.
|
||||||
package test
|
package test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/api/meta"
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
)
|
)
|
||||||
|
@ -38,12 +37,12 @@ func (m *FakeMapper) ResourceFor(input schema.GroupVersionResource) (schema.Grou
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
if m.Resources == nil {
|
if m.Resources == nil {
|
||||||
return schema.GroupVersionResource{}, errors.New("invalid resource")
|
return schema.GroupVersionResource{}, errors.Errorf("invalid resource %q", input.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
if gr, found := m.Resources[input]; found {
|
if gr, found := m.Resources[input]; found {
|
||||||
return gr, nil
|
return gr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return schema.GroupVersionResource{}, errors.New("invalid resource")
|
return schema.GroupVersionResource{}, errors.Errorf("invalid resource %q", input.String())
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,29 @@
|
||||||
|
/*
|
||||||
|
Copyright 2017 the Heptio Ark contributors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewLogger() *logrus.Entry {
|
||||||
|
logger := logrus.New()
|
||||||
|
logger.Out = ioutil.Discard
|
||||||
|
return logrus.NewEntry(logger)
|
||||||
|
}
|
Loading…
Reference in New Issue