Fix #6752: add namespace exclude check.

Add PSA audit and warn labels.

Signed-off-by: Xun Jiang <jxun@vmware.com>
pull/6760/head
Xun Jiang 2023-09-05 10:50:20 +08:00
parent 8d3a67544d
commit 79b810ed25
4 changed files with 25 additions and 30 deletions

View File

@ -0,0 +1 @@
Fix #6752: add namespace exclude check.

View File

@ -196,9 +196,8 @@ func (r *itemCollector) getResourceItems(log logrus.FieldLogger, gv schema.Group
log.Info("Getting items for resource") log.Info("Getting items for resource")
var ( var (
gvr = gv.WithResource(resource.Name) gvr = gv.WithResource(resource.Name)
gr = gvr.GroupResource() gr = gvr.GroupResource()
clusterScoped = !resource.Namespaced
) )
orders := getOrderedResourcesForType(r.backupRequest.Backup.Spec.OrderedResources, resource.Name) orders := getOrderedResourcesForType(r.backupRequest.Backup.Spec.OrderedResources, resource.Name)
@ -272,8 +271,6 @@ func (r *itemCollector) getResourceItems(log logrus.FieldLogger, gv schema.Group
} }
} }
namespacesToList := getNamespacesToList(r.backupRequest.NamespaceIncludesExcludes)
// Handle namespace resource here. // Handle namespace resource here.
// Namespace are only filtered by namespace include/exclude filters. // Namespace are only filtered by namespace include/exclude filters.
// Label selectors are not checked. // Label selectors are not checked.
@ -289,11 +286,14 @@ func (r *itemCollector) getResourceItems(log logrus.FieldLogger, gv schema.Group
return nil, errors.WithStack(err) return nil, errors.WithStack(err)
} }
items := r.backupNamespaces(unstructuredList, namespacesToList, gr, preferredGVR, log) items := r.backupNamespaces(unstructuredList, r.backupRequest.NamespaceIncludesExcludes, gr, preferredGVR, log)
return items, nil return items, nil
} }
clusterScoped := !resource.Namespaced
namespacesToList := getNamespacesToList(r.backupRequest.NamespaceIncludesExcludes)
// If we get here, we're backing up something other than namespaces // If we get here, we're backing up something other than namespaces
if clusterScoped { if clusterScoped {
namespacesToList = []string{""} namespacesToList = []string{""}
@ -533,31 +533,13 @@ func (r *itemCollector) listItemsForLabel(unstructuredItems []unstructured.Unstr
// backupNamespaces process namespace resource according to namespace filters. // backupNamespaces process namespace resource according to namespace filters.
func (r *itemCollector) backupNamespaces(unstructuredList *unstructured.UnstructuredList, func (r *itemCollector) backupNamespaces(unstructuredList *unstructured.UnstructuredList,
namespacesToList []string, gr schema.GroupResource, preferredGVR schema.GroupVersionResource, ie *collections.IncludesExcludes, gr schema.GroupResource, preferredGVR schema.GroupVersionResource,
log logrus.FieldLogger) []*kubernetesResource { log logrus.FieldLogger) []*kubernetesResource {
var items []*kubernetesResource var items []*kubernetesResource
for index, unstructured := range unstructuredList.Items { for index, unstructured := range unstructuredList.Items {
found := false if ie.ShouldInclude(unstructured.GetName()) {
if len(namespacesToList) == 0 { log.Debugf("Backup namespace %s due to namespace filters setting.", unstructured.GetName())
// No namespace found. By far, this condition cannot be triggered. Either way,
// namespacesToList is not empty.
log.Debug("Skip namespace resource, because no item found by namespace filters.")
break
} else if len(namespacesToList) == 1 && namespacesToList[0] == "" {
// All namespaces are included.
log.Debugf("Backup namespace %s due to full cluster backup.", unstructured.GetName())
found = true
} else {
for _, ns := range namespacesToList {
if unstructured.GetName() == ns {
log.Debugf("Backup namespace %s due to namespace filters setting.", unstructured.GetName())
found = true
break
}
}
}
if found {
path, err := r.writeToFile(&unstructuredList.Items[index]) path, err := r.writeToFile(&unstructuredList.Items[index])
if err != nil { if err != nil {
log.WithError(err).Error("Error writing item to file") log.WithError(err).Error("Error writing item to file")

View File

@ -32,7 +32,11 @@ import (
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
) )
const defaultServiceAccountName = "velero" const (
defaultServiceAccountName = "velero"
podSecurityLevel = "privileged"
podSecurityVersion = "latest"
)
var ( var (
DefaultVeleroPodCPURequest = "500m" DefaultVeleroPodCPURequest = "500m"
@ -148,8 +152,12 @@ func Namespace(namespace string) *corev1.Namespace {
}, },
} }
ns.Labels["pod-security.kubernetes.io/enforce"] = "privileged" ns.Labels["pod-security.kubernetes.io/enforce"] = podSecurityLevel
ns.Labels["pod-security.kubernetes.io/enforce-version"] = "latest" ns.Labels["pod-security.kubernetes.io/enforce-version"] = podSecurityVersion
ns.Labels["pod-security.kubernetes.io/audit"] = podSecurityLevel
ns.Labels["pod-security.kubernetes.io/audit-version"] = podSecurityVersion
ns.Labels["pod-security.kubernetes.io/warn"] = podSecurityLevel
ns.Labels["pod-security.kubernetes.io/warn-version"] = podSecurityVersion
return ns return ns
} }

View File

@ -47,6 +47,10 @@ func TestResources(t *testing.T) {
// PSA(Pod Security Admission) and PSS(Pod Security Standards). // PSA(Pod Security Admission) and PSS(Pod Security Standards).
assert.Equal(t, ns.Labels["pod-security.kubernetes.io/enforce"], "privileged") assert.Equal(t, ns.Labels["pod-security.kubernetes.io/enforce"], "privileged")
assert.Equal(t, ns.Labels["pod-security.kubernetes.io/enforce-version"], "latest") assert.Equal(t, ns.Labels["pod-security.kubernetes.io/enforce-version"], "latest")
assert.Equal(t, ns.Labels["pod-security.kubernetes.io/audit"], "privileged")
assert.Equal(t, ns.Labels["pod-security.kubernetes.io/audit-version"], "latest")
assert.Equal(t, ns.Labels["pod-security.kubernetes.io/warn"], "privileged")
assert.Equal(t, ns.Labels["pod-security.kubernetes.io/warn-version"], "latest")
crb := ClusterRoleBinding(DefaultVeleroNamespace) crb := ClusterRoleBinding(DefaultVeleroNamespace)
// The CRB is a cluster-scoped resource // The CRB is a cluster-scoped resource