2018-02-28 01:35:35 +00:00
|
|
|
/*
|
2021-02-18 18:30:52 +00:00
|
|
|
Copyright the Velero contributors.
|
2018-02-28 01:35:35 +00:00
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package controller
|
|
|
|
|
|
|
|
import (
|
k8s 1.18 import (#2651)
* k8s 1.18 import wip
backup, cmd, controller, generated, restic, restore, serverstatusrequest, test and util
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* go mod tidy
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* add changelog file
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* go fmt
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* update code-generator and controller-gen in CI
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* checkout proper code-generator version, regen
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* fix remaining calls
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* regenerate CRDs with ./hack/update-generated-crd-code.sh
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* use existing context in restic and server
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* fix test cases by resetting resource version
also use main library go context, not golang.org/x/net/context, in pkg/restore/restore.go
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* clarify changelog message
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* use github.com/kubernetes-csi/external-snapshotter/v2@v2.2.0-rc1
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* run 'go mod tidy' to remove old external-snapshotter version
Signed-off-by: Andrew Lavery <laverya@umich.edu>
2020-07-16 16:21:37 +00:00
|
|
|
"context"
|
2018-02-28 01:35:35 +00:00
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
2018-06-07 20:38:00 +00:00
|
|
|
"io/ioutil"
|
2018-02-28 01:35:35 +00:00
|
|
|
"os"
|
2018-06-07 20:38:00 +00:00
|
|
|
"path/filepath"
|
2018-02-28 01:35:35 +00:00
|
|
|
|
|
|
|
jsonpatch "github.com/evanphx/json-patch"
|
|
|
|
"github.com/pkg/errors"
|
|
|
|
"github.com/sirupsen/logrus"
|
|
|
|
corev1api "k8s.io/api/core/v1"
|
|
|
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
2019-10-14 16:20:28 +00:00
|
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
2018-02-28 01:35:35 +00:00
|
|
|
"k8s.io/apimachinery/pkg/labels"
|
|
|
|
"k8s.io/apimachinery/pkg/types"
|
2019-06-28 15:58:02 +00:00
|
|
|
"k8s.io/apimachinery/pkg/util/clock"
|
2018-02-28 01:35:35 +00:00
|
|
|
corev1informers "k8s.io/client-go/informers/core/v1"
|
|
|
|
corev1listers "k8s.io/client-go/listers/core/v1"
|
|
|
|
"k8s.io/client-go/tools/cache"
|
2020-06-24 16:55:18 +00:00
|
|
|
k8scache "sigs.k8s.io/controller-runtime/pkg/cache"
|
|
|
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
2018-02-28 01:35:35 +00:00
|
|
|
|
Use Credential from BSL for restic commands (#3489)
* Use Credential from BSL for restic commands
This change introduces support for restic to make use of per-BSL
credentials. It makes use of the `credentials.FileStore` introduced in
PR #3442 to write the BSL credentials to disk. To support per-BSL
credentials for restic, the environment for the restic commands needs to
be modified for each provider to ensure that the credentials are
provided via the correct provider specific environment variables.
This change introduces a new function `restic.CmdEnv` to check the BSL
provider and create the correct mapping of environment variables for
each provider.
Previously, AWS and GCP could rely on the environment variables in the
Velero deployments to obtain the credentials file, but now these
environment variables need to be set with the path to the serialized
credentials file if a credential is set on the BSL.
For Azure, the credentials file in the environment was loaded and parsed
to set the environment variables for restic. Now, we check if the BSL
has a credential, and if it does, load and parse that file instead.
This change also introduces a few other small improvements. Now that we
are fetching the BSL to check for the `Credential` field, we can use the
BSL directly to get the `CACert` which means that we can remove the
`GetCACert` function. Also, now that we have a way to serialize secrets
to disk, we can use the `credentials.FileStore` to get a temp file for
the restic repo password and remove the `restic.TempCredentialsFile`
function.
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Add documentation for per-BSL credentials
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Address review feedback
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Address review comments
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
2021-03-11 18:10:51 +00:00
|
|
|
"github.com/vmware-tanzu/velero/internal/credentials"
|
2019-09-30 21:26:56 +00:00
|
|
|
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
|
|
|
velerov1client "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1"
|
|
|
|
informers "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1"
|
|
|
|
listers "github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1"
|
|
|
|
"github.com/vmware-tanzu/velero/pkg/restic"
|
|
|
|
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
|
|
|
|
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
|
|
|
|
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
2018-02-28 01:35:35 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type podVolumeRestoreController struct {
|
|
|
|
*genericController
|
|
|
|
|
2019-01-25 03:33:07 +00:00
|
|
|
podVolumeRestoreClient velerov1client.PodVolumeRestoresGetter
|
2018-02-28 01:35:35 +00:00
|
|
|
podVolumeRestoreLister listers.PodVolumeRestoreLister
|
|
|
|
podLister corev1listers.PodLister
|
|
|
|
pvcLister corev1listers.PersistentVolumeClaimLister
|
2019-07-10 22:16:21 +00:00
|
|
|
pvLister corev1listers.PersistentVolumeLister
|
2020-06-24 16:55:18 +00:00
|
|
|
backupLocationInformer k8scache.Informer
|
|
|
|
kbClient client.Client
|
2018-02-28 01:35:35 +00:00
|
|
|
nodeName string
|
Use Credential from BSL for restic commands (#3489)
* Use Credential from BSL for restic commands
This change introduces support for restic to make use of per-BSL
credentials. It makes use of the `credentials.FileStore` introduced in
PR #3442 to write the BSL credentials to disk. To support per-BSL
credentials for restic, the environment for the restic commands needs to
be modified for each provider to ensure that the credentials are
provided via the correct provider specific environment variables.
This change introduces a new function `restic.CmdEnv` to check the BSL
provider and create the correct mapping of environment variables for
each provider.
Previously, AWS and GCP could rely on the environment variables in the
Velero deployments to obtain the credentials file, but now these
environment variables need to be set with the path to the serialized
credentials file if a credential is set on the BSL.
For Azure, the credentials file in the environment was loaded and parsed
to set the environment variables for restic. Now, we check if the BSL
has a credential, and if it does, load and parse that file instead.
This change also introduces a few other small improvements. Now that we
are fetching the BSL to check for the `Credential` field, we can use the
BSL directly to get the `CACert` which means that we can remove the
`GetCACert` function. Also, now that we have a way to serialize secrets
to disk, we can use the `credentials.FileStore` to get a temp file for
the restic repo password and remove the `restic.TempCredentialsFile`
function.
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Add documentation for per-BSL credentials
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Address review feedback
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Address review comments
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
2021-03-11 18:10:51 +00:00
|
|
|
credentialsFileStore credentials.FileStore
|
2018-02-28 01:35:35 +00:00
|
|
|
|
2019-01-25 03:33:07 +00:00
|
|
|
processRestoreFunc func(*velerov1api.PodVolumeRestore) error
|
2018-06-18 17:54:07 +00:00
|
|
|
fileSystem filesystem.Interface
|
2019-06-28 15:58:02 +00:00
|
|
|
clock clock.Clock
|
2018-02-28 01:35:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewPodVolumeRestoreController creates a new pod volume restore controller.
|
|
|
|
func NewPodVolumeRestoreController(
|
|
|
|
logger logrus.FieldLogger,
|
|
|
|
podVolumeRestoreInformer informers.PodVolumeRestoreInformer,
|
2019-01-25 03:33:07 +00:00
|
|
|
podVolumeRestoreClient velerov1client.PodVolumeRestoresGetter,
|
2018-02-28 01:35:35 +00:00
|
|
|
podInformer cache.SharedIndexInformer,
|
|
|
|
pvcInformer corev1informers.PersistentVolumeClaimInformer,
|
2019-07-10 22:16:21 +00:00
|
|
|
pvInformer corev1informers.PersistentVolumeInformer,
|
2020-06-24 16:55:18 +00:00
|
|
|
kbClient client.Client,
|
2018-02-28 01:35:35 +00:00
|
|
|
nodeName string,
|
Use Credential from BSL for restic commands (#3489)
* Use Credential from BSL for restic commands
This change introduces support for restic to make use of per-BSL
credentials. It makes use of the `credentials.FileStore` introduced in
PR #3442 to write the BSL credentials to disk. To support per-BSL
credentials for restic, the environment for the restic commands needs to
be modified for each provider to ensure that the credentials are
provided via the correct provider specific environment variables.
This change introduces a new function `restic.CmdEnv` to check the BSL
provider and create the correct mapping of environment variables for
each provider.
Previously, AWS and GCP could rely on the environment variables in the
Velero deployments to obtain the credentials file, but now these
environment variables need to be set with the path to the serialized
credentials file if a credential is set on the BSL.
For Azure, the credentials file in the environment was loaded and parsed
to set the environment variables for restic. Now, we check if the BSL
has a credential, and if it does, load and parse that file instead.
This change also introduces a few other small improvements. Now that we
are fetching the BSL to check for the `Credential` field, we can use the
BSL directly to get the `CACert` which means that we can remove the
`GetCACert` function. Also, now that we have a way to serialize secrets
to disk, we can use the `credentials.FileStore` to get a temp file for
the restic repo password and remove the `restic.TempCredentialsFile`
function.
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Add documentation for per-BSL credentials
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Address review feedback
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Address review comments
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
2021-03-11 18:10:51 +00:00
|
|
|
credentialsFileStore credentials.FileStore,
|
2018-02-28 01:35:35 +00:00
|
|
|
) Interface {
|
|
|
|
c := &podVolumeRestoreController{
|
2020-10-06 17:58:56 +00:00
|
|
|
genericController: newGenericController(PodVolumeRestore, logger),
|
2018-02-28 01:35:35 +00:00
|
|
|
podVolumeRestoreClient: podVolumeRestoreClient,
|
|
|
|
podVolumeRestoreLister: podVolumeRestoreInformer.Lister(),
|
|
|
|
podLister: corev1listers.NewPodLister(podInformer.GetIndexer()),
|
|
|
|
pvcLister: pvcInformer.Lister(),
|
2019-07-10 22:16:21 +00:00
|
|
|
pvLister: pvInformer.Lister(),
|
2020-06-24 16:55:18 +00:00
|
|
|
kbClient: kbClient,
|
2018-02-28 01:35:35 +00:00
|
|
|
nodeName: nodeName,
|
Use Credential from BSL for restic commands (#3489)
* Use Credential from BSL for restic commands
This change introduces support for restic to make use of per-BSL
credentials. It makes use of the `credentials.FileStore` introduced in
PR #3442 to write the BSL credentials to disk. To support per-BSL
credentials for restic, the environment for the restic commands needs to
be modified for each provider to ensure that the credentials are
provided via the correct provider specific environment variables.
This change introduces a new function `restic.CmdEnv` to check the BSL
provider and create the correct mapping of environment variables for
each provider.
Previously, AWS and GCP could rely on the environment variables in the
Velero deployments to obtain the credentials file, but now these
environment variables need to be set with the path to the serialized
credentials file if a credential is set on the BSL.
For Azure, the credentials file in the environment was loaded and parsed
to set the environment variables for restic. Now, we check if the BSL
has a credential, and if it does, load and parse that file instead.
This change also introduces a few other small improvements. Now that we
are fetching the BSL to check for the `Credential` field, we can use the
BSL directly to get the `CACert` which means that we can remove the
`GetCACert` function. Also, now that we have a way to serialize secrets
to disk, we can use the `credentials.FileStore` to get a temp file for
the restic repo password and remove the `restic.TempCredentialsFile`
function.
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Add documentation for per-BSL credentials
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Address review feedback
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Address review comments
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
2021-03-11 18:10:51 +00:00
|
|
|
credentialsFileStore: credentialsFileStore,
|
2018-06-18 17:54:07 +00:00
|
|
|
|
|
|
|
fileSystem: filesystem.NewFileSystem(),
|
2019-06-28 15:58:02 +00:00
|
|
|
clock: &clock.RealClock{},
|
2018-02-28 01:35:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
c.syncHandler = c.processQueueItem
|
|
|
|
c.cacheSyncWaiters = append(
|
|
|
|
c.cacheSyncWaiters,
|
|
|
|
podVolumeRestoreInformer.Informer().HasSynced,
|
|
|
|
podInformer.HasSynced,
|
|
|
|
pvcInformer.Informer().HasSynced,
|
|
|
|
)
|
|
|
|
c.processRestoreFunc = c.processRestore
|
|
|
|
|
|
|
|
podVolumeRestoreInformer.Informer().AddEventHandler(
|
|
|
|
cache.ResourceEventHandlerFuncs{
|
|
|
|
AddFunc: c.pvrHandler,
|
|
|
|
UpdateFunc: func(_, obj interface{}) {
|
|
|
|
c.pvrHandler(obj)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
podInformer.AddEventHandler(
|
|
|
|
cache.ResourceEventHandlerFuncs{
|
|
|
|
AddFunc: c.podHandler,
|
|
|
|
UpdateFunc: func(_, obj interface{}) {
|
|
|
|
c.podHandler(obj)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
return c
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *podVolumeRestoreController) pvrHandler(obj interface{}) {
|
2019-01-25 03:33:07 +00:00
|
|
|
pvr := obj.(*velerov1api.PodVolumeRestore)
|
2018-06-20 22:46:41 +00:00
|
|
|
log := loggerForPodVolumeRestore(c.logger, pvr)
|
2018-02-28 01:35:35 +00:00
|
|
|
|
2018-06-20 22:46:41 +00:00
|
|
|
if !isPVRNew(pvr) {
|
|
|
|
log.Debugf("Restore is not new, not enqueuing")
|
2018-02-28 01:35:35 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-06-20 22:46:41 +00:00
|
|
|
pod, err := c.podLister.Pods(pvr.Spec.Pod.Namespace).Get(pvr.Spec.Pod.Name)
|
|
|
|
if apierrors.IsNotFound(err) {
|
|
|
|
log.WithError(err).Debugf("Restore's pod %s/%s not found, not enqueueing.", pvr.Spec.Pod.Namespace, pvr.Spec.Pod.Name)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
log.WithError(err).Errorf("Unable to get restore's pod %s/%s, not enqueueing.", pvr.Spec.Pod.Namespace, pvr.Spec.Pod.Name)
|
|
|
|
return
|
2018-02-28 01:35:35 +00:00
|
|
|
}
|
|
|
|
|
2018-06-20 22:46:41 +00:00
|
|
|
if !isPodOnNode(pod, c.nodeName) {
|
|
|
|
log.Debugf("Restore's pod is not on this node, not enqueuing")
|
|
|
|
return
|
2018-02-28 01:35:35 +00:00
|
|
|
}
|
|
|
|
|
2018-06-20 22:46:41 +00:00
|
|
|
if !isResticInitContainerRunning(pod) {
|
|
|
|
log.Debug("Restore's pod is not running restic-wait init container, not enqueuing")
|
|
|
|
return
|
2018-02-28 01:35:35 +00:00
|
|
|
}
|
|
|
|
|
2020-10-21 22:15:03 +00:00
|
|
|
resticInitContainerIndex := getResticInitContainerIndex(pod)
|
|
|
|
if resticInitContainerIndex > 0 {
|
|
|
|
log.Warnf(`Init containers before the %s container may cause issues
|
|
|
|
if they interfere with volumes being restored: %s index %d`, restic.InitContainer, restic.InitContainer, resticInitContainerIndex)
|
|
|
|
}
|
|
|
|
|
2018-06-20 22:46:41 +00:00
|
|
|
log.Debug("Enqueueing")
|
|
|
|
c.enqueue(obj)
|
2018-02-28 01:35:35 +00:00
|
|
|
}
|
|
|
|
|
2018-06-20 22:46:41 +00:00
|
|
|
func (c *podVolumeRestoreController) podHandler(obj interface{}) {
|
|
|
|
pod := obj.(*corev1api.Pod)
|
|
|
|
log := c.logger.WithField("key", kube.NamespaceAndName(pod))
|
2018-02-28 01:35:35 +00:00
|
|
|
|
2018-06-20 22:46:41 +00:00
|
|
|
// the pod should always be for this node since the podInformer is filtered
|
|
|
|
// based on node, so this is just a failsafe.
|
|
|
|
if !isPodOnNode(pod, c.nodeName) {
|
|
|
|
return
|
|
|
|
}
|
2018-02-28 01:35:35 +00:00
|
|
|
|
2018-06-20 22:46:41 +00:00
|
|
|
if !isResticInitContainerRunning(pod) {
|
|
|
|
log.Debug("Pod is not running restic-wait init container, not enqueuing restores for pod")
|
|
|
|
return
|
2018-02-28 01:35:35 +00:00
|
|
|
}
|
|
|
|
|
2020-10-21 22:15:03 +00:00
|
|
|
resticInitContainerIndex := getResticInitContainerIndex(pod)
|
|
|
|
if resticInitContainerIndex > 0 {
|
|
|
|
log.Warnf(`Init containers before the %s container may cause issues
|
|
|
|
if they interfere with volumes being restored: %s index %d`, restic.InitContainer, restic.InitContainer, resticInitContainerIndex)
|
|
|
|
}
|
|
|
|
|
2018-09-19 18:51:30 +00:00
|
|
|
selector := labels.Set(map[string]string{
|
2019-01-25 03:33:07 +00:00
|
|
|
velerov1api.PodUIDLabel: string(pod.UID),
|
2018-09-19 18:51:30 +00:00
|
|
|
}).AsSelector()
|
2018-02-28 01:35:35 +00:00
|
|
|
|
2018-06-20 22:46:41 +00:00
|
|
|
pvrs, err := c.podVolumeRestoreLister.List(selector)
|
2018-02-28 01:35:35 +00:00
|
|
|
if err != nil {
|
|
|
|
log.WithError(err).Error("Unable to list pod volume restores")
|
2018-06-20 22:46:41 +00:00
|
|
|
return
|
2018-02-28 01:35:35 +00:00
|
|
|
}
|
|
|
|
|
2018-06-20 22:46:41 +00:00
|
|
|
if len(pvrs) == 0 {
|
|
|
|
return
|
2018-02-28 01:35:35 +00:00
|
|
|
}
|
|
|
|
|
2018-06-20 22:46:41 +00:00
|
|
|
for _, pvr := range pvrs {
|
|
|
|
log := loggerForPodVolumeRestore(log, pvr)
|
|
|
|
if !isPVRNew(pvr) {
|
|
|
|
log.Debug("Restore is not new, not enqueuing")
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
log.Debug("Enqueuing")
|
|
|
|
c.enqueue(pvr)
|
2018-02-28 01:35:35 +00:00
|
|
|
}
|
2018-06-20 22:46:41 +00:00
|
|
|
}
|
2018-02-28 01:35:35 +00:00
|
|
|
|
2019-01-25 03:33:07 +00:00
|
|
|
func isPVRNew(pvr *velerov1api.PodVolumeRestore) bool {
|
|
|
|
return pvr.Status.Phase == "" || pvr.Status.Phase == velerov1api.PodVolumeRestorePhaseNew
|
2018-06-20 22:46:41 +00:00
|
|
|
}
|
2018-02-28 01:35:35 +00:00
|
|
|
|
2018-06-20 22:46:41 +00:00
|
|
|
func isPodOnNode(pod *corev1api.Pod, node string) bool {
|
|
|
|
return pod.Spec.NodeName == node
|
2018-02-28 01:35:35 +00:00
|
|
|
}
|
|
|
|
|
2018-06-20 22:46:41 +00:00
|
|
|
func isResticInitContainerRunning(pod *corev1api.Pod) bool {
|
2020-10-21 22:15:03 +00:00
|
|
|
// Restic wait container can be anywhere in the list of init containers, but must be running.
|
|
|
|
i := getResticInitContainerIndex(pod)
|
2021-02-08 18:26:56 +00:00
|
|
|
return i >= 0 &&
|
|
|
|
len(pod.Status.InitContainerStatuses)-1 >= i &&
|
|
|
|
pod.Status.InitContainerStatuses[i].State.Running != nil
|
2020-10-21 22:15:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func getResticInitContainerIndex(pod *corev1api.Pod) int {
|
|
|
|
// Restic wait container can be anywhere in the list of init containers so locate it.
|
|
|
|
for i, initContainer := range pod.Spec.InitContainers {
|
|
|
|
if initContainer.Name == restic.InitContainer {
|
|
|
|
return i
|
|
|
|
}
|
2018-06-14 20:40:19 +00:00
|
|
|
}
|
|
|
|
|
2020-10-21 22:15:03 +00:00
|
|
|
return -1
|
2018-02-28 01:35:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *podVolumeRestoreController) processQueueItem(key string) error {
|
|
|
|
log := c.logger.WithField("key", key)
|
2018-06-20 22:46:41 +00:00
|
|
|
log.Debug("Running processQueueItem")
|
2018-02-28 01:35:35 +00:00
|
|
|
|
|
|
|
ns, name, err := cache.SplitMetaNamespaceKey(key)
|
|
|
|
if err != nil {
|
|
|
|
log.WithError(errors.WithStack(err)).Error("error splitting queue key")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
req, err := c.podVolumeRestoreLister.PodVolumeRestores(ns).Get(name)
|
|
|
|
if apierrors.IsNotFound(err) {
|
|
|
|
log.Debug("Unable to find PodVolumeRestore")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "error getting PodVolumeRestore")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Don't mutate the shared cache
|
|
|
|
reqCopy := req.DeepCopy()
|
|
|
|
return c.processRestoreFunc(reqCopy)
|
|
|
|
}
|
|
|
|
|
2019-01-25 03:33:07 +00:00
|
|
|
func loggerForPodVolumeRestore(baseLogger logrus.FieldLogger, req *velerov1api.PodVolumeRestore) logrus.FieldLogger {
|
2018-06-20 22:46:41 +00:00
|
|
|
log := baseLogger.WithFields(logrus.Fields{
|
2018-02-28 01:35:35 +00:00
|
|
|
"namespace": req.Namespace,
|
|
|
|
"name": req.Name,
|
|
|
|
})
|
|
|
|
|
2018-06-20 22:46:41 +00:00
|
|
|
if len(req.OwnerReferences) == 1 {
|
|
|
|
log = log.WithField("restore", fmt.Sprintf("%s/%s", req.Namespace, req.OwnerReferences[0].Name))
|
|
|
|
}
|
|
|
|
|
|
|
|
return log
|
|
|
|
}
|
|
|
|
|
2019-01-25 03:33:07 +00:00
|
|
|
func (c *podVolumeRestoreController) processRestore(req *velerov1api.PodVolumeRestore) error {
|
2018-06-20 22:46:41 +00:00
|
|
|
log := loggerForPodVolumeRestore(c.logger, req)
|
|
|
|
|
|
|
|
log.Info("Restore starting")
|
|
|
|
|
2018-02-28 01:35:35 +00:00
|
|
|
var err error
|
|
|
|
|
|
|
|
// update status to InProgress
|
2019-06-28 15:58:02 +00:00
|
|
|
req, err = c.patchPodVolumeRestore(req, func(r *velerov1api.PodVolumeRestore) {
|
|
|
|
r.Status.Phase = velerov1api.PodVolumeRestorePhaseInProgress
|
2019-10-14 16:20:28 +00:00
|
|
|
r.Status.StartTimestamp = &metav1.Time{Time: c.clock.Now()}
|
2019-06-28 15:58:02 +00:00
|
|
|
})
|
2018-02-28 01:35:35 +00:00
|
|
|
if err != nil {
|
2019-06-28 15:58:02 +00:00
|
|
|
log.WithError(err).Error("Error setting PodVolumeRestore startTimestamp and phase to InProgress")
|
2018-02-28 01:35:35 +00:00
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
pod, err := c.podLister.Pods(req.Spec.Pod.Namespace).Get(req.Spec.Pod.Name)
|
|
|
|
if err != nil {
|
|
|
|
log.WithError(err).Errorf("Error getting pod %s/%s", req.Spec.Pod.Namespace, req.Spec.Pod.Name)
|
2018-06-07 20:38:00 +00:00
|
|
|
return c.failRestore(req, errors.Wrap(err, "error getting pod").Error(), log)
|
2018-02-28 01:35:35 +00:00
|
|
|
}
|
|
|
|
|
2019-07-10 22:16:21 +00:00
|
|
|
volumeDir, err := kube.GetVolumeDirectory(pod, req.Spec.Volume, c.pvcLister, c.pvLister)
|
2018-02-28 01:35:35 +00:00
|
|
|
if err != nil {
|
|
|
|
log.WithError(err).Error("Error getting volume directory name")
|
2018-06-07 20:38:00 +00:00
|
|
|
return c.failRestore(req, errors.Wrap(err, "error getting volume directory name").Error(), log)
|
2018-02-28 01:35:35 +00:00
|
|
|
}
|
|
|
|
|
2018-06-07 20:38:00 +00:00
|
|
|
// execute the restore process
|
Use Credential from BSL for restic commands (#3489)
* Use Credential from BSL for restic commands
This change introduces support for restic to make use of per-BSL
credentials. It makes use of the `credentials.FileStore` introduced in
PR #3442 to write the BSL credentials to disk. To support per-BSL
credentials for restic, the environment for the restic commands needs to
be modified for each provider to ensure that the credentials are
provided via the correct provider specific environment variables.
This change introduces a new function `restic.CmdEnv` to check the BSL
provider and create the correct mapping of environment variables for
each provider.
Previously, AWS and GCP could rely on the environment variables in the
Velero deployments to obtain the credentials file, but now these
environment variables need to be set with the path to the serialized
credentials file if a credential is set on the BSL.
For Azure, the credentials file in the environment was loaded and parsed
to set the environment variables for restic. Now, we check if the BSL
has a credential, and if it does, load and parse that file instead.
This change also introduces a few other small improvements. Now that we
are fetching the BSL to check for the `Credential` field, we can use the
BSL directly to get the `CACert` which means that we can remove the
`GetCACert` function. Also, now that we have a way to serialize secrets
to disk, we can use the `credentials.FileStore` to get a temp file for
the restic repo password and remove the `restic.TempCredentialsFile`
function.
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Add documentation for per-BSL credentials
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Address review feedback
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Address review comments
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
2021-03-11 18:10:51 +00:00
|
|
|
if err := c.restorePodVolume(req, volumeDir, log); err != nil {
|
2018-06-07 20:38:00 +00:00
|
|
|
log.WithError(err).Error("Error restoring volume")
|
|
|
|
return c.failRestore(req, errors.Wrap(err, "error restoring volume").Error(), log)
|
|
|
|
}
|
|
|
|
|
|
|
|
// update status to Completed
|
2019-06-28 15:58:02 +00:00
|
|
|
if _, err = c.patchPodVolumeRestore(req, func(r *velerov1api.PodVolumeRestore) {
|
|
|
|
r.Status.Phase = velerov1api.PodVolumeRestorePhaseCompleted
|
2019-10-14 16:20:28 +00:00
|
|
|
r.Status.CompletionTimestamp = &metav1.Time{Time: c.clock.Now()}
|
2019-06-28 15:58:02 +00:00
|
|
|
}); err != nil {
|
|
|
|
log.WithError(err).Error("Error setting PodVolumeRestore completionTimestamp and phase to Completed")
|
2018-06-07 20:38:00 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-06-20 22:46:41 +00:00
|
|
|
log.Info("Restore completed")
|
|
|
|
|
2018-06-07 20:38:00 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
Use Credential from BSL for restic commands (#3489)
* Use Credential from BSL for restic commands
This change introduces support for restic to make use of per-BSL
credentials. It makes use of the `credentials.FileStore` introduced in
PR #3442 to write the BSL credentials to disk. To support per-BSL
credentials for restic, the environment for the restic commands needs to
be modified for each provider to ensure that the credentials are
provided via the correct provider specific environment variables.
This change introduces a new function `restic.CmdEnv` to check the BSL
provider and create the correct mapping of environment variables for
each provider.
Previously, AWS and GCP could rely on the environment variables in the
Velero deployments to obtain the credentials file, but now these
environment variables need to be set with the path to the serialized
credentials file if a credential is set on the BSL.
For Azure, the credentials file in the environment was loaded and parsed
to set the environment variables for restic. Now, we check if the BSL
has a credential, and if it does, load and parse that file instead.
This change also introduces a few other small improvements. Now that we
are fetching the BSL to check for the `Credential` field, we can use the
BSL directly to get the `CACert` which means that we can remove the
`GetCACert` function. Also, now that we have a way to serialize secrets
to disk, we can use the `credentials.FileStore` to get a temp file for
the restic repo password and remove the `restic.TempCredentialsFile`
function.
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Add documentation for per-BSL credentials
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Address review feedback
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Address review comments
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
2021-03-11 18:10:51 +00:00
|
|
|
func (c *podVolumeRestoreController) restorePodVolume(req *velerov1api.PodVolumeRestore, volumeDir string, log logrus.FieldLogger) error {
|
2018-06-14 20:40:19 +00:00
|
|
|
// Get the full path of the new volume's directory as mounted in the daemonset pod, which
|
|
|
|
// will look like: /host_pods/<new-pod-uid>/volumes/<volume-plugin-name>/<volume-dir>
|
|
|
|
volumePath, err := singlePathMatch(fmt.Sprintf("/host_pods/%s/volumes/*/%s", string(req.Spec.Pod.UID), volumeDir))
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "error identifying path of volume")
|
|
|
|
}
|
|
|
|
|
Use Credential from BSL for restic commands (#3489)
* Use Credential from BSL for restic commands
This change introduces support for restic to make use of per-BSL
credentials. It makes use of the `credentials.FileStore` introduced in
PR #3442 to write the BSL credentials to disk. To support per-BSL
credentials for restic, the environment for the restic commands needs to
be modified for each provider to ensure that the credentials are
provided via the correct provider specific environment variables.
This change introduces a new function `restic.CmdEnv` to check the BSL
provider and create the correct mapping of environment variables for
each provider.
Previously, AWS and GCP could rely on the environment variables in the
Velero deployments to obtain the credentials file, but now these
environment variables need to be set with the path to the serialized
credentials file if a credential is set on the BSL.
For Azure, the credentials file in the environment was loaded and parsed
to set the environment variables for restic. Now, we check if the BSL
has a credential, and if it does, load and parse that file instead.
This change also introduces a few other small improvements. Now that we
are fetching the BSL to check for the `Credential` field, we can use the
BSL directly to get the `CACert` which means that we can remove the
`GetCACert` function. Also, now that we have a way to serialize secrets
to disk, we can use the `credentials.FileStore` to get a temp file for
the restic repo password and remove the `restic.TempCredentialsFile`
function.
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Add documentation for per-BSL credentials
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Address review feedback
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Address review comments
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
2021-03-11 18:10:51 +00:00
|
|
|
credsFile, err := c.credentialsFileStore.Path(restic.RepoKeySelector())
|
|
|
|
if err != nil {
|
|
|
|
log.WithError(err).Error("Error creating temp restic credentials file")
|
|
|
|
return c.failRestore(req, errors.Wrap(err, "error creating temp restic credentials file").Error(), log)
|
|
|
|
}
|
|
|
|
// ignore error since there's nothing we can do and it's a temp file.
|
|
|
|
defer os.Remove(credsFile)
|
|
|
|
|
2018-02-28 01:35:35 +00:00
|
|
|
resticCmd := restic.RestoreCommand(
|
2018-06-15 03:24:01 +00:00
|
|
|
req.Spec.RepoIdentifier,
|
2018-06-07 20:38:00 +00:00
|
|
|
credsFile,
|
2018-02-28 01:35:35 +00:00
|
|
|
req.Spec.SnapshotID,
|
2018-06-14 20:40:19 +00:00
|
|
|
volumePath,
|
2018-02-28 01:35:35 +00:00
|
|
|
)
|
|
|
|
|
Use Credential from BSL for restic commands (#3489)
* Use Credential from BSL for restic commands
This change introduces support for restic to make use of per-BSL
credentials. It makes use of the `credentials.FileStore` introduced in
PR #3442 to write the BSL credentials to disk. To support per-BSL
credentials for restic, the environment for the restic commands needs to
be modified for each provider to ensure that the credentials are
provided via the correct provider specific environment variables.
This change introduces a new function `restic.CmdEnv` to check the BSL
provider and create the correct mapping of environment variables for
each provider.
Previously, AWS and GCP could rely on the environment variables in the
Velero deployments to obtain the credentials file, but now these
environment variables need to be set with the path to the serialized
credentials file if a credential is set on the BSL.
For Azure, the credentials file in the environment was loaded and parsed
to set the environment variables for restic. Now, we check if the BSL
has a credential, and if it does, load and parse that file instead.
This change also introduces a few other small improvements. Now that we
are fetching the BSL to check for the `Credential` field, we can use the
BSL directly to get the `CACert` which means that we can remove the
`GetCACert` function. Also, now that we have a way to serialize secrets
to disk, we can use the `credentials.FileStore` to get a temp file for
the restic repo password and remove the `restic.TempCredentialsFile`
function.
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Add documentation for per-BSL credentials
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Address review feedback
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Address review comments
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
2021-03-11 18:10:51 +00:00
|
|
|
backupLocation := &velerov1api.BackupStorageLocation{}
|
|
|
|
if err := c.kbClient.Get(context.Background(), client.ObjectKey{
|
|
|
|
Namespace: req.Namespace,
|
|
|
|
Name: req.Spec.BackupStorageLocation,
|
|
|
|
}, backupLocation); err != nil {
|
|
|
|
return c.failRestore(req, errors.Wrap(err, "error getting backup storage location").Error(), log)
|
|
|
|
}
|
|
|
|
|
|
|
|
// if there's a caCert on the ObjectStorage, write it to disk so that it can be passed to restic
|
|
|
|
var caCertFile string
|
|
|
|
if backupLocation.Spec.ObjectStorage != nil && backupLocation.Spec.ObjectStorage.CACert != nil {
|
|
|
|
caCertFile, err = restic.TempCACertFile(backupLocation.Spec.ObjectStorage.CACert, req.Spec.BackupStorageLocation, c.fileSystem)
|
2019-12-09 14:46:02 +00:00
|
|
|
if err != nil {
|
Use Credential from BSL for restic commands (#3489)
* Use Credential from BSL for restic commands
This change introduces support for restic to make use of per-BSL
credentials. It makes use of the `credentials.FileStore` introduced in
PR #3442 to write the BSL credentials to disk. To support per-BSL
credentials for restic, the environment for the restic commands needs to
be modified for each provider to ensure that the credentials are
provided via the correct provider specific environment variables.
This change introduces a new function `restic.CmdEnv` to check the BSL
provider and create the correct mapping of environment variables for
each provider.
Previously, AWS and GCP could rely on the environment variables in the
Velero deployments to obtain the credentials file, but now these
environment variables need to be set with the path to the serialized
credentials file if a credential is set on the BSL.
For Azure, the credentials file in the environment was loaded and parsed
to set the environment variables for restic. Now, we check if the BSL
has a credential, and if it does, load and parse that file instead.
This change also introduces a few other small improvements. Now that we
are fetching the BSL to check for the `Credential` field, we can use the
BSL directly to get the `CACert` which means that we can remove the
`GetCACert` function. Also, now that we have a way to serialize secrets
to disk, we can use the `credentials.FileStore` to get a temp file for
the restic repo password and remove the `restic.TempCredentialsFile`
function.
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Add documentation for per-BSL credentials
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Address review feedback
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Address review comments
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
2021-03-11 18:10:51 +00:00
|
|
|
log.WithError(err).Error("Error creating temp cacert file")
|
2019-12-09 14:46:02 +00:00
|
|
|
}
|
Use Credential from BSL for restic commands (#3489)
* Use Credential from BSL for restic commands
This change introduces support for restic to make use of per-BSL
credentials. It makes use of the `credentials.FileStore` introduced in
PR #3442 to write the BSL credentials to disk. To support per-BSL
credentials for restic, the environment for the restic commands needs to
be modified for each provider to ensure that the credentials are
provided via the correct provider specific environment variables.
This change introduces a new function `restic.CmdEnv` to check the BSL
provider and create the correct mapping of environment variables for
each provider.
Previously, AWS and GCP could rely on the environment variables in the
Velero deployments to obtain the credentials file, but now these
environment variables need to be set with the path to the serialized
credentials file if a credential is set on the BSL.
For Azure, the credentials file in the environment was loaded and parsed
to set the environment variables for restic. Now, we check if the BSL
has a credential, and if it does, load and parse that file instead.
This change also introduces a few other small improvements. Now that we
are fetching the BSL to check for the `Credential` field, we can use the
BSL directly to get the `CACert` which means that we can remove the
`GetCACert` function. Also, now that we have a way to serialize secrets
to disk, we can use the `credentials.FileStore` to get a temp file for
the restic repo password and remove the `restic.TempCredentialsFile`
function.
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Add documentation for per-BSL credentials
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Address review feedback
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Address review comments
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
2021-03-11 18:10:51 +00:00
|
|
|
// ignore error since there's nothing we can do and it's a temp file.
|
|
|
|
defer os.Remove(caCertFile)
|
|
|
|
}
|
|
|
|
resticCmd.CACertFile = caCertFile
|
|
|
|
|
|
|
|
env, err := restic.CmdEnv(backupLocation, c.credentialsFileStore)
|
|
|
|
if err != nil {
|
|
|
|
return c.failRestore(req, errors.Wrap(err, "error setting restic cmd env").Error(), log)
|
2018-09-25 21:46:29 +00:00
|
|
|
}
|
Use Credential from BSL for restic commands (#3489)
* Use Credential from BSL for restic commands
This change introduces support for restic to make use of per-BSL
credentials. It makes use of the `credentials.FileStore` introduced in
PR #3442 to write the BSL credentials to disk. To support per-BSL
credentials for restic, the environment for the restic commands needs to
be modified for each provider to ensure that the credentials are
provided via the correct provider specific environment variables.
This change introduces a new function `restic.CmdEnv` to check the BSL
provider and create the correct mapping of environment variables for
each provider.
Previously, AWS and GCP could rely on the environment variables in the
Velero deployments to obtain the credentials file, but now these
environment variables need to be set with the path to the serialized
credentials file if a credential is set on the BSL.
For Azure, the credentials file in the environment was loaded and parsed
to set the environment variables for restic. Now, we check if the BSL
has a credential, and if it does, load and parse that file instead.
This change also introduces a few other small improvements. Now that we
are fetching the BSL to check for the `Credential` field, we can use the
BSL directly to get the `CACert` which means that we can remove the
`GetCACert` function. Also, now that we have a way to serialize secrets
to disk, we can use the `credentials.FileStore` to get a temp file for
the restic repo password and remove the `restic.TempCredentialsFile`
function.
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Add documentation for per-BSL credentials
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Address review feedback
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Address review comments
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
2021-03-11 18:10:51 +00:00
|
|
|
resticCmd.Env = env
|
2018-09-25 21:46:29 +00:00
|
|
|
|
2018-06-14 20:40:19 +00:00
|
|
|
var stdout, stderr string
|
2018-02-28 01:35:35 +00:00
|
|
|
|
2019-09-10 21:50:57 +00:00
|
|
|
if stdout, stderr, err = restic.RunRestore(resticCmd, log, c.updateRestoreProgressFunc(req, log)); err != nil {
|
2018-06-07 20:38:00 +00:00
|
|
|
return errors.Wrapf(err, "error running restic restore, cmd=%s, stdout=%s, stderr=%s", resticCmd.String(), stdout, stderr)
|
2018-02-28 01:35:35 +00:00
|
|
|
}
|
|
|
|
log.Debugf("Ran command=%s, stdout=%s, stderr=%s", resticCmd.String(), stdout, stderr)
|
|
|
|
|
2019-01-25 03:33:07 +00:00
|
|
|
// Remove the .velero directory from the restored volume (it may contain done files from previous restores
|
2018-06-12 20:39:29 +00:00
|
|
|
// of this volume, which we don't want to carry over). If this fails for any reason, log and continue, since
|
|
|
|
// this is non-essential cleanup (the done files are named based on restore UID and the init container looks
|
|
|
|
// for the one specific to the restore being executed).
|
2019-01-25 03:33:07 +00:00
|
|
|
if err := os.RemoveAll(filepath.Join(volumePath, ".velero")); err != nil {
|
|
|
|
log.WithError(err).Warnf("error removing .velero directory from directory %s", volumePath)
|
2018-06-07 20:38:00 +00:00
|
|
|
}
|
|
|
|
|
2018-02-28 01:35:35 +00:00
|
|
|
var restoreUID types.UID
|
|
|
|
for _, owner := range req.OwnerReferences {
|
|
|
|
if boolptr.IsSetToTrue(owner.Controller) {
|
|
|
|
restoreUID = owner.UID
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-25 03:33:07 +00:00
|
|
|
// Create the .velero directory within the volume dir so we can write a done file
|
2018-06-07 20:38:00 +00:00
|
|
|
// for this restore.
|
2019-01-25 03:33:07 +00:00
|
|
|
if err := os.MkdirAll(filepath.Join(volumePath, ".velero"), 0755); err != nil {
|
|
|
|
return errors.Wrap(err, "error creating .velero directory for done file")
|
2018-02-28 01:35:35 +00:00
|
|
|
}
|
|
|
|
|
2019-01-25 03:33:07 +00:00
|
|
|
// Write a done file with name=<restore-uid> into the just-created .velero dir
|
|
|
|
// within the volume. The velero restic init container on the pod is waiting
|
2018-06-07 20:38:00 +00:00
|
|
|
// for this file to exist in each restored volume before completing.
|
2019-01-25 03:33:07 +00:00
|
|
|
if err := ioutil.WriteFile(filepath.Join(volumePath, ".velero", string(restoreUID)), nil, 0644); err != nil {
|
2018-06-07 20:38:00 +00:00
|
|
|
return errors.Wrap(err, "error writing done file")
|
2018-02-28 01:35:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-01-25 03:33:07 +00:00
|
|
|
func (c *podVolumeRestoreController) patchPodVolumeRestore(req *velerov1api.PodVolumeRestore, mutate func(*velerov1api.PodVolumeRestore)) (*velerov1api.PodVolumeRestore, error) {
|
2018-02-28 01:35:35 +00:00
|
|
|
// Record original json
|
|
|
|
oldData, err := json.Marshal(req)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "error marshalling original PodVolumeRestore")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Mutate
|
|
|
|
mutate(req)
|
|
|
|
|
|
|
|
// Record new json
|
|
|
|
newData, err := json.Marshal(req)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "error marshalling updated PodVolumeRestore")
|
|
|
|
}
|
|
|
|
|
|
|
|
patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "error creating json merge patch for PodVolumeRestore")
|
|
|
|
}
|
|
|
|
|
k8s 1.18 import (#2651)
* k8s 1.18 import wip
backup, cmd, controller, generated, restic, restore, serverstatusrequest, test and util
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* go mod tidy
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* add changelog file
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* go fmt
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* update code-generator and controller-gen in CI
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* checkout proper code-generator version, regen
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* fix remaining calls
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* regenerate CRDs with ./hack/update-generated-crd-code.sh
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* use existing context in restic and server
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* fix test cases by resetting resource version
also use main library go context, not golang.org/x/net/context, in pkg/restore/restore.go
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* clarify changelog message
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* use github.com/kubernetes-csi/external-snapshotter/v2@v2.2.0-rc1
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* run 'go mod tidy' to remove old external-snapshotter version
Signed-off-by: Andrew Lavery <laverya@umich.edu>
2020-07-16 16:21:37 +00:00
|
|
|
req, err = c.podVolumeRestoreClient.PodVolumeRestores(req.Namespace).Patch(context.TODO(), req.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{})
|
2018-02-28 01:35:35 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "error patching PodVolumeRestore")
|
|
|
|
}
|
|
|
|
|
|
|
|
return req, nil
|
|
|
|
}
|
|
|
|
|
2019-01-25 03:33:07 +00:00
|
|
|
func (c *podVolumeRestoreController) failRestore(req *velerov1api.PodVolumeRestore, msg string, log logrus.FieldLogger) error {
|
|
|
|
if _, err := c.patchPodVolumeRestore(req, func(pvr *velerov1api.PodVolumeRestore) {
|
|
|
|
pvr.Status.Phase = velerov1api.PodVolumeRestorePhaseFailed
|
2018-02-28 01:35:35 +00:00
|
|
|
pvr.Status.Message = msg
|
2019-10-14 16:20:28 +00:00
|
|
|
pvr.Status.CompletionTimestamp = &metav1.Time{Time: c.clock.Now()}
|
2018-02-28 01:35:35 +00:00
|
|
|
}); err != nil {
|
2019-06-28 15:58:02 +00:00
|
|
|
log.WithError(err).Error("Error setting PodVolumeRestore phase to Failed")
|
2018-02-28 01:35:35 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2019-09-10 21:50:57 +00:00
|
|
|
|
|
|
|
// updateRestoreProgressFunc returns a func that takes progress info and patches
|
|
|
|
// the PVR with the new progress
|
|
|
|
func (c *podVolumeRestoreController) updateRestoreProgressFunc(req *velerov1api.PodVolumeRestore, log logrus.FieldLogger) func(velerov1api.PodVolumeOperationProgress) {
|
|
|
|
return func(progress velerov1api.PodVolumeOperationProgress) {
|
|
|
|
if _, err := c.patchPodVolumeRestore(req, func(r *velerov1api.PodVolumeRestore) {
|
|
|
|
r.Status.Progress = progress
|
|
|
|
}); err != nil {
|
|
|
|
log.WithError(err).Error("error updating PodVolumeRestore progress")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|