Restore finalizer and managedFields (#5853)
Restore finalizer and managedFields of metadata during the restoration Signed-off-by: Wenkai Yin(尹文开) <yinw@vmware.com>pull/5894/head
parent
fa58a775e8
commit
4d0c3ac83f
|
@ -0,0 +1 @@
|
|||
Restore finalizer and managedFields of metadata during the restoration
|
|
@ -146,3 +146,10 @@ func WithGenerateName(val string) func(obj metav1.Object) {
|
|||
obj.SetGenerateName(val)
|
||||
}
|
||||
}
|
||||
|
||||
// WithManagedFields is a functional option that applies the specified managed fields to an object.
|
||||
func WithManagedFields(val []metav1.ManagedFieldsEntry) func(obj metav1.Object) {
|
||||
return func(obj metav1.Object) {
|
||||
obj.SetManagedFields(val)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1395,6 +1395,24 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
|||
}
|
||||
}
|
||||
|
||||
// restore the managedFields
|
||||
withoutManagedFields := createdObj.DeepCopy()
|
||||
createdObj.SetManagedFields(obj.GetManagedFields())
|
||||
patchBytes, err := generatePatch(withoutManagedFields, createdObj)
|
||||
if err != nil {
|
||||
ctx.log.Errorf("error generating patch for managed fields %s: %v", kube.NamespaceAndName(obj), err)
|
||||
errs.Add(namespace, err)
|
||||
return warnings, errs
|
||||
}
|
||||
if patchBytes != nil {
|
||||
if _, err = resourceClient.Patch(name, patchBytes); err != nil {
|
||||
ctx.log.Errorf("error patch for managed fields %s: %v", kube.NamespaceAndName(obj), err)
|
||||
errs.Add(namespace, err)
|
||||
return warnings, errs
|
||||
}
|
||||
ctx.log.Infof("the managed fields for %s is patched", kube.NamespaceAndName(obj))
|
||||
}
|
||||
|
||||
if groupResource == kuberesource.Pods {
|
||||
pod := new(v1.Pod)
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), pod); err != nil {
|
||||
|
@ -1694,8 +1712,8 @@ func resetMetadata(obj *unstructured.Unstructured) (*unstructured.Unstructured,
|
|||
|
||||
for k := range metadata {
|
||||
switch k {
|
||||
case "name", "namespace", "labels", "annotations":
|
||||
default:
|
||||
case "generateName", "selfLink", "uid", "resourceVersion", "generation", "creationTimestamp", "deletionTimestamp",
|
||||
"deletionGracePeriodSeconds", "ownerReferences":
|
||||
delete(metadata, k)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -864,7 +864,7 @@ func TestRestoreItems(t *testing.T) {
|
|||
want []*test.APIResource
|
||||
}{
|
||||
{
|
||||
name: "metadata other than namespace/name/labels/annotations gets removed",
|
||||
name: "metadata uid/resourceVersion/etc. gets removed",
|
||||
restore: defaultRestore().Result(),
|
||||
backup: defaultBackup().Result(),
|
||||
tarball: test.NewTarWriter(t).
|
||||
|
@ -874,6 +874,7 @@ func TestRestoreItems(t *testing.T) {
|
|||
builder.WithLabels("key-1", "val-1"),
|
||||
builder.WithAnnotations("key-1", "val-1"),
|
||||
builder.WithFinalizers("finalizer-1"),
|
||||
builder.WithUID("uid"),
|
||||
).
|
||||
Result(),
|
||||
).
|
||||
|
@ -887,6 +888,7 @@ func TestRestoreItems(t *testing.T) {
|
|||
ObjectMeta(
|
||||
builder.WithLabels("key-1", "val-1", "velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1"),
|
||||
builder.WithAnnotations("key-1", "val-1"),
|
||||
builder.WithFinalizers("finalizer-1"),
|
||||
).
|
||||
Result(),
|
||||
),
|
||||
|
@ -1104,6 +1106,53 @@ func TestRestoreItems(t *testing.T) {
|
|||
}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "metadata managedFields gets restored",
|
||||
restore: defaultRestore().Result(),
|
||||
backup: defaultBackup().Result(),
|
||||
tarball: test.NewTarWriter(t).
|
||||
AddItems("pods",
|
||||
builder.ForPod("ns-1", "pod-1").
|
||||
ObjectMeta(
|
||||
builder.WithManagedFields([]metav1.ManagedFieldsEntry{
|
||||
{
|
||||
Manager: "kubectl",
|
||||
Operation: "Apply",
|
||||
APIVersion: "v1",
|
||||
FieldsType: "FieldsV1",
|
||||
FieldsV1: &metav1.FieldsV1{
|
||||
Raw: []byte(`{"f:data": {"f:key":{}}}`),
|
||||
},
|
||||
},
|
||||
}),
|
||||
).
|
||||
Result(),
|
||||
).
|
||||
Done(),
|
||||
apiResources: []*test.APIResource{
|
||||
test.Pods(),
|
||||
},
|
||||
want: []*test.APIResource{
|
||||
test.Pods(
|
||||
builder.ForPod("ns-1", "pod-1").
|
||||
ObjectMeta(
|
||||
builder.WithLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1"),
|
||||
builder.WithManagedFields([]metav1.ManagedFieldsEntry{
|
||||
{
|
||||
Manager: "kubectl",
|
||||
Operation: "Apply",
|
||||
APIVersion: "v1",
|
||||
FieldsType: "FieldsV1",
|
||||
FieldsV1: &metav1.FieldsV1{
|
||||
Raw: []byte(`{"f:data": {"f:key":{}}}`),
|
||||
},
|
||||
},
|
||||
}),
|
||||
).
|
||||
Result(),
|
||||
),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
|
@ -2846,10 +2895,16 @@ func TestResetMetadata(t *testing.T) {
|
|||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
name: "keep name, namespace, labels, annotations only",
|
||||
obj: NewTestUnstructured().WithMetadata("name", "blah", "namespace", "labels", "annotations", "foo").Unstructured,
|
||||
name: "keep name, namespace, labels, annotations, managedFields, finalizers",
|
||||
obj: NewTestUnstructured().WithMetadata("name", "namespace", "labels", "annotations", "managedFields", "finalizers").Unstructured,
|
||||
expectedErr: false,
|
||||
expectedRes: NewTestUnstructured().WithMetadata("name", "namespace", "labels", "annotations").Unstructured,
|
||||
expectedRes: NewTestUnstructured().WithMetadata("name", "namespace", "labels", "annotations", "managedFields", "finalizers").Unstructured,
|
||||
},
|
||||
{
|
||||
name: "remove uid, ownerReferences",
|
||||
obj: NewTestUnstructured().WithMetadata("name", "namespace", "uid", "ownerReferences").Unstructured,
|
||||
expectedErr: false,
|
||||
expectedRes: NewTestUnstructured().WithMetadata("name", "namespace").Unstructured,
|
||||
},
|
||||
{
|
||||
name: "keep status",
|
||||
|
|
|
@ -18,6 +18,7 @@ package restore
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
@ -83,10 +84,10 @@ func deleteNodePorts(service *corev1api.Service) error {
|
|||
// find any NodePorts whose values were explicitly specified according
|
||||
// to the last-applied-config annotation. We'll retain these values, and
|
||||
// clear out any other (presumably auto-assigned) NodePort values.
|
||||
explicitNodePorts := sets.NewString()
|
||||
unnamedPortInts := sets.NewInt()
|
||||
lastAppliedConfig, ok := service.Annotations[annotationLastAppliedConfig]
|
||||
if ok {
|
||||
explicitNodePorts := sets.NewString()
|
||||
unnamedPortInts := sets.NewInt()
|
||||
appliedServiceUnstructured := new(map[string]interface{})
|
||||
if err := json.Unmarshal([]byte(lastAppliedConfig), appliedServiceUnstructured); err != nil {
|
||||
return errors.WithStack(err)
|
||||
|
@ -134,19 +135,58 @@ func deleteNodePorts(service *corev1api.Service) error {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
for i, port := range service.Spec.Ports {
|
||||
if port.Name != "" {
|
||||
if !explicitNodePorts.Has(port.Name) {
|
||||
service.Spec.Ports[i].NodePort = 0
|
||||
}
|
||||
} else {
|
||||
if !unnamedPortInts.Has(int(port.NodePort)) {
|
||||
service.Spec.Ports[i].NodePort = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
for i, port := range service.Spec.Ports {
|
||||
if port.Name != "" {
|
||||
if !explicitNodePorts.Has(port.Name) {
|
||||
service.Spec.Ports[i].NodePort = 0
|
||||
explicitNodePorts := sets.NewString()
|
||||
for _, entry := range service.GetManagedFields() {
|
||||
if entry.FieldsV1 == nil {
|
||||
continue
|
||||
}
|
||||
fields := new(map[string]interface{})
|
||||
if err := json.Unmarshal(entry.FieldsV1.Raw, fields); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
ports, exist, err := unstructured.NestedMap(*fields, "f:spec", "f:ports")
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if !exist {
|
||||
continue
|
||||
}
|
||||
for key, port := range ports {
|
||||
p, ok := port.(map[string]interface{})
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if !unnamedPortInts.Has(int(port.NodePort)) {
|
||||
service.Spec.Ports[i].NodePort = 0
|
||||
if _, exist := p["f:nodePort"]; exist {
|
||||
explicitNodePorts.Insert(key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for i, port := range service.Spec.Ports {
|
||||
k := portKey(port)
|
||||
if !explicitNodePorts.Has(k) {
|
||||
service.Spec.Ports[i].NodePort = 0
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func portKey(port corev1api.ServicePort) string {
|
||||
return fmt.Sprintf(`k:{"port":%d,"protocol":"%s"}`, port.Port, port.Protocol)
|
||||
}
|
||||
|
|
|
@ -368,6 +368,124 @@ func TestServiceActionExecute(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "nodePort should be delete when not specified in managedFields",
|
||||
obj: corev1api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "svc-1",
|
||||
ManagedFields: []metav1.ManagedFieldsEntry{
|
||||
{
|
||||
FieldsV1: &metav1.FieldsV1{
|
||||
Raw: []byte(`{"f:spec":{"f:ports":{"k:{\"port\":443,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{}},"k:{\"port\":80,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{}}},"f:selector":{},"f:type":{}}}`),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Spec: corev1api.ServiceSpec{
|
||||
Ports: []corev1api.ServicePort{
|
||||
{
|
||||
Name: "http",
|
||||
Port: 80,
|
||||
Protocol: "TCP",
|
||||
},
|
||||
{
|
||||
Name: "https",
|
||||
Port: 443,
|
||||
Protocol: "TCP",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
restore: builder.ForRestore(api.DefaultNamespace, "").Result(),
|
||||
expectedRes: corev1api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "svc-1",
|
||||
ManagedFields: []metav1.ManagedFieldsEntry{
|
||||
{
|
||||
FieldsV1: &metav1.FieldsV1{
|
||||
Raw: []byte(`{"f:spec":{"f:ports":{"k:{\"port\":443,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{}},"k:{\"port\":80,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{}}},"f:selector":{},"f:type":{}}}`),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Spec: corev1api.ServiceSpec{
|
||||
Ports: []corev1api.ServicePort{
|
||||
{
|
||||
Name: "http",
|
||||
Port: 80,
|
||||
NodePort: 0,
|
||||
Protocol: "TCP",
|
||||
},
|
||||
{
|
||||
Name: "https",
|
||||
Port: 443,
|
||||
NodePort: 0,
|
||||
Protocol: "TCP",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "nodePort should be preserved when specified in managedFields",
|
||||
obj: corev1api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "svc-1",
|
||||
ManagedFields: []metav1.ManagedFieldsEntry{
|
||||
{
|
||||
FieldsV1: &metav1.FieldsV1{
|
||||
Raw: []byte(`{"f:spec":{"f:ports":{"k:{\"port\":443,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:nodePort":{},"f:port":{}},"k:{\"port\":80,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:nodePort":{},"f:port":{}}},"f:selector":{},"f:type":{}}}`),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Spec: corev1api.ServiceSpec{
|
||||
Ports: []corev1api.ServicePort{
|
||||
{
|
||||
Name: "http",
|
||||
Port: 80,
|
||||
NodePort: 30000,
|
||||
Protocol: "TCP",
|
||||
},
|
||||
{
|
||||
Name: "https",
|
||||
Port: 443,
|
||||
NodePort: 30002,
|
||||
Protocol: "TCP",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
restore: builder.ForRestore(api.DefaultNamespace, "").Result(),
|
||||
expectedRes: corev1api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "svc-1",
|
||||
ManagedFields: []metav1.ManagedFieldsEntry{
|
||||
{
|
||||
FieldsV1: &metav1.FieldsV1{
|
||||
Raw: []byte(`{"f:spec":{"f:ports":{"k:{\"port\":443,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:nodePort":{},"f:port":{}},"k:{\"port\":80,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:nodePort":{},"f:port":{}}},"f:selector":{},"f:type":{}}}`),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Spec: corev1api.ServiceSpec{
|
||||
Ports: []corev1api.ServicePort{
|
||||
{
|
||||
Name: "http",
|
||||
Port: 80,
|
||||
NodePort: 30000,
|
||||
Protocol: "TCP",
|
||||
},
|
||||
{
|
||||
Name: "https",
|
||||
Port: 443,
|
||||
NodePort: 30002,
|
||||
Protocol: "TCP",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
|
Loading…
Reference in New Issue