parent
f78dd073bf
commit
b51d1a0202
|
@ -255,7 +255,7 @@ func (r *DataDownloadReconciler) Reconcile(ctx context.Context, req ctrl.Request
|
|||
}
|
||||
|
||||
func (r *DataDownloadReconciler) runCancelableDataPath(ctx context.Context, fsRestore datapath.AsyncBR, dd *velerov2alpha1api.DataDownload, res *exposer.ExposeResult, log logrus.FieldLogger) (reconcile.Result, error) {
|
||||
path, err := exposer.GetPodVolumeHostPath(ctx, res.ByPod.HostingPod, res.ByPod.PVC, r.client, r.fileSystem, log)
|
||||
path, err := exposer.GetPodVolumeHostPath(ctx, res.ByPod.HostingPod, res.ByPod.VolumeName, r.client, r.fileSystem, log)
|
||||
if err != nil {
|
||||
return r.errorOut(ctx, dd, err, "error exposing host path for pod volume", log)
|
||||
}
|
||||
|
|
|
@ -343,7 +343,7 @@ func TestDataDownloadReconcile(t *testing.T) {
|
|||
} else if test.notNilExpose {
|
||||
hostingPod := builder.ForPod("test-ns", "test-name").Volumes(&corev1.Volume{Name: "test-pvc"}).Result()
|
||||
hostingPod.ObjectMeta.SetUID("test-uid")
|
||||
ep.On("GetExposed", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&exposer.ExposeResult{ByPod: exposer.ExposeByPod{HostingPod: hostingPod, PVC: "test-pvc"}}, nil)
|
||||
ep.On("GetExposed", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&exposer.ExposeResult{ByPod: exposer.ExposeByPod{HostingPod: hostingPod, VolumeName: "test-pvc"}}, nil)
|
||||
} else if test.isGetExposeErr {
|
||||
ep.On("GetExposed", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.New("Error to get restore exposer"))
|
||||
}
|
||||
|
|
|
@ -250,7 +250,7 @@ func (r *DataUploadReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
|||
|
||||
func (r *DataUploadReconciler) runCancelableDataUpload(ctx context.Context, fsBackup datapath.AsyncBR, du *velerov2alpha1api.DataUpload, res *exposer.ExposeResult, log logrus.FieldLogger) (reconcile.Result, error) {
|
||||
log.Info("Run cancelable dataUpload")
|
||||
path, err := exposer.GetPodVolumeHostPath(ctx, res.ByPod.HostingPod, res.ByPod.PVC, r.client, r.fileSystem, log)
|
||||
path, err := exposer.GetPodVolumeHostPath(ctx, res.ByPod.HostingPod, res.ByPod.VolumeName, r.client, r.fileSystem, log)
|
||||
if err != nil {
|
||||
return r.errorOut(ctx, du, err, "error exposing host path for pod volume", log)
|
||||
}
|
||||
|
|
|
@ -253,7 +253,7 @@ func (f *fakeSnapshotExposer) GetExposed(ctx context.Context, du corev1.ObjectRe
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &exposer.ExposeResult{ByPod: exposer.ExposeByPod{HostingPod: pod, PVC: dataUploadName}}, nil
|
||||
return &exposer.ExposeResult{ByPod: exposer.ExposeByPod{HostingPod: pod, VolumeName: dataUploadName}}, nil
|
||||
}
|
||||
|
||||
func (f *fakeSnapshotExposer) CleanUp(context.Context, corev1.ObjectReference, string, string) {
|
||||
|
|
|
@ -218,7 +218,7 @@ func (e *csiSnapshotExposer) GetExposed(ctx context.Context, ownerObject corev1.
|
|||
|
||||
curLog.WithField("backup pvc", backupPVCName).Info("Backup PVC is bound")
|
||||
|
||||
return &ExposeResult{ByPod: ExposeByPod{HostingPod: pod, PVC: backupPVCName}}, nil
|
||||
return &ExposeResult{ByPod: ExposeByPod{HostingPod: pod, VolumeName: pod.Spec.Volumes[0].Name}}, nil
|
||||
}
|
||||
|
||||
func (e *csiSnapshotExposer) CleanUp(ctx context.Context, ownerObject corev1.ObjectReference, vsName string, sourceNamespace string) {
|
||||
|
@ -345,6 +345,9 @@ func (e *csiSnapshotExposer) createBackupPVC(ctx context.Context, ownerObject co
|
|||
func (e *csiSnapshotExposer) createBackupPod(ctx context.Context, ownerObject corev1.ObjectReference, backupPVC *corev1.PersistentVolumeClaim, label map[string]string) (*corev1.Pod, error) {
|
||||
podName := ownerObject.Name
|
||||
|
||||
volumeName := string(ownerObject.UID)
|
||||
containerName := string(ownerObject.UID)
|
||||
|
||||
podInfo, err := getInheritedPodInfo(ctx, e.kubeClient, ownerObject.Namespace)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error to get inherited pod info from node-agent")
|
||||
|
@ -370,20 +373,20 @@ func (e *csiSnapshotExposer) createBackupPod(ctx context.Context, ownerObject co
|
|||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: podName,
|
||||
Name: containerName,
|
||||
Image: podInfo.image,
|
||||
ImagePullPolicy: corev1.PullNever,
|
||||
Command: []string{"/velero-helper", "pause"},
|
||||
VolumeMounts: []corev1.VolumeMount{{
|
||||
Name: backupPVC.Name,
|
||||
MountPath: "/" + backupPVC.Name,
|
||||
Name: volumeName,
|
||||
MountPath: "/" + volumeName,
|
||||
}},
|
||||
},
|
||||
},
|
||||
ServiceAccountName: podInfo.serviceAccount,
|
||||
TerminationGracePeriodSeconds: &gracePeriod,
|
||||
Volumes: []corev1.Volume{{
|
||||
Name: backupPVC.Name,
|
||||
Name: volumeName,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: backupPVC.Name,
|
||||
|
|
|
@ -143,7 +143,7 @@ func (e *genericRestoreExposer) GetExposed(ctx context.Context, ownerObject core
|
|||
|
||||
curLog.WithField("restore pvc", restorePVCName).Info("Restore PVC is bound")
|
||||
|
||||
return &ExposeResult{ByPod: ExposeByPod{HostingPod: pod, PVC: restorePVCName}}, nil
|
||||
return &ExposeResult{ByPod: ExposeByPod{HostingPod: pod, VolumeName: pod.Spec.Volumes[0].Name}}, nil
|
||||
}
|
||||
|
||||
func (e *genericRestoreExposer) CleanUp(ctx context.Context, ownerObject corev1.ObjectReference) {
|
||||
|
@ -251,6 +251,9 @@ func (e *genericRestoreExposer) createRestorePod(ctx context.Context, ownerObjec
|
|||
restorePodName := ownerObject.Name
|
||||
restorePVCName := ownerObject.Name
|
||||
|
||||
volumeName := string(ownerObject.UID)
|
||||
containerName := string(ownerObject.UID)
|
||||
|
||||
podInfo, err := getInheritedPodInfo(ctx, e.kubeClient, ownerObject.Namespace)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error to get inherited pod info from node-agent")
|
||||
|
@ -276,20 +279,20 @@ func (e *genericRestoreExposer) createRestorePod(ctx context.Context, ownerObjec
|
|||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: restorePodName,
|
||||
Name: containerName,
|
||||
Image: podInfo.image,
|
||||
ImagePullPolicy: corev1.PullNever,
|
||||
Command: []string{"/velero-helper", "pause"},
|
||||
VolumeMounts: []corev1.VolumeMount{{
|
||||
Name: restorePVCName,
|
||||
MountPath: "/" + restorePVCName,
|
||||
Name: volumeName,
|
||||
MountPath: "/" + volumeName,
|
||||
}},
|
||||
},
|
||||
},
|
||||
ServiceAccountName: podInfo.serviceAccount,
|
||||
TerminationGracePeriodSeconds: &gracePeriod,
|
||||
Volumes: []corev1.Volume{{
|
||||
Name: restorePVCName,
|
||||
Name: volumeName,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: restorePVCName,
|
||||
|
|
|
@ -34,23 +34,23 @@ var getVolumeDirectory = kube.GetVolumeDirectory
|
|||
var singlePathMatch = kube.SinglePathMatch
|
||||
|
||||
// GetPodVolumeHostPath returns a path that can be accessed from the host for a given volume of a pod
|
||||
func GetPodVolumeHostPath(ctx context.Context, pod *corev1.Pod, pvcName string,
|
||||
func GetPodVolumeHostPath(ctx context.Context, pod *corev1.Pod, volumeName string,
|
||||
cli ctrlclient.Client, fs filesystem.Interface, log logrus.FieldLogger) (datapath.AccessPoint, error) {
|
||||
logger := log.WithField("pod name", pod.Name).WithField("pod UID", pod.GetUID()).WithField("pvc", pvcName)
|
||||
logger := log.WithField("pod name", pod.Name).WithField("pod UID", pod.GetUID()).WithField("volume", volumeName)
|
||||
|
||||
volDir, err := getVolumeDirectory(ctx, logger, pod, pvcName, cli)
|
||||
volDir, err := getVolumeDirectory(ctx, logger, pod, volumeName, cli)
|
||||
if err != nil {
|
||||
return datapath.AccessPoint{}, errors.Wrapf(err, "error getting volume directory name for pvc %s in pod %s", pvcName, pod.Name)
|
||||
return datapath.AccessPoint{}, errors.Wrapf(err, "error getting volume directory name for volume %s in pod %s", volumeName, pod.Name)
|
||||
}
|
||||
|
||||
logger.WithField("volDir", volDir).Info("Got volume for backup PVC")
|
||||
logger.WithField("volDir", volDir).Info("Got volume dir")
|
||||
|
||||
pathGlob := fmt.Sprintf("/host_pods/%s/volumes/*/%s", string(pod.GetUID()), volDir)
|
||||
logger.WithField("pathGlob", pathGlob).Debug("Looking for path matching glob")
|
||||
|
||||
path, err := singlePathMatch(pathGlob, fs, logger)
|
||||
if err != nil {
|
||||
return datapath.AccessPoint{}, errors.Wrapf(err, "error identifying unique volume path on host for pvc %s in pod %s", pvcName, pod.Name)
|
||||
return datapath.AccessPoint{}, errors.Wrapf(err, "error identifying unique volume path on host for volume %s in pod %s", volumeName, pod.Name)
|
||||
}
|
||||
|
||||
logger.WithField("path", path).Info("Found path matching glob")
|
||||
|
|
|
@ -48,7 +48,7 @@ func TestGetPodVolumeHostPath(t *testing.T) {
|
|||
},
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, "fake-pod-1").Result(),
|
||||
pvc: "fake-pvc-1",
|
||||
err: "error getting volume directory name for pvc fake-pvc-1 in pod fake-pod-1: fake-error-1",
|
||||
err: "error getting volume directory name for volume fake-pvc-1 in pod fake-pod-1: fake-error-1",
|
||||
},
|
||||
{
|
||||
name: "single path match fail",
|
||||
|
@ -60,7 +60,7 @@ func TestGetPodVolumeHostPath(t *testing.T) {
|
|||
},
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, "fake-pod-2").Result(),
|
||||
pvc: "fake-pvc-1",
|
||||
err: "error identifying unique volume path on host for pvc fake-pvc-1 in pod fake-pod-2: fake-error-2",
|
||||
err: "error identifying unique volume path on host for volume fake-pvc-1 in pod fake-pod-2: fake-error-2",
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
@ -33,5 +33,5 @@ type ExposeResult struct {
|
|||
// ExposeByPod defines the result for the expose method that a hosting pod is created
|
||||
type ExposeByPod struct {
|
||||
HostingPod *corev1.Pod
|
||||
PVC string
|
||||
VolumeName string
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue