new exposer for data mover ms

Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
pull/7988/head
Lyndon-Li 2024-06-06 18:27:51 +08:00
parent a8d77eae95
commit 49097744ee
17 changed files with 904 additions and 45 deletions

View File

@ -0,0 +1 @@
New data path for data mover ms according to design #7574

View File

@ -0,0 +1,92 @@
/*
Copyright The Velero Contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package datamover
import (
"fmt"
"strings"
"time"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/vmware-tanzu/velero/pkg/buildinfo"
"github.com/vmware-tanzu/velero/pkg/client"
"github.com/vmware-tanzu/velero/pkg/util/logging"
)
type dataMoverBackupConfig struct {
volumePath string
volumeMode string
duName string
resourceTimeout time.Duration
}
func NewBackupCommand(f client.Factory) *cobra.Command {
config := dataMoverBackupConfig{}
logLevelFlag := logging.LogLevelFlag(logrus.InfoLevel)
formatFlag := logging.NewFormatFlag()
command := &cobra.Command{
Use: "backup",
Short: "Run the velero data-mover backup",
Long: "Run the velero data-mover backup",
Hidden: true,
Run: func(c *cobra.Command, args []string) {
logLevel := logLevelFlag.Parse()
logrus.Infof("Setting log-level to %s", strings.ToUpper(logLevel.String()))
logger := logging.DefaultLogger(logLevel, formatFlag.Parse())
logger.Infof("Starting Velero data-mover backup %s (%s)", buildinfo.Version, buildinfo.FormattedGitSHA())
f.SetBasename(fmt.Sprintf("%s-%s", c.Parent().Name(), c.Name()))
s := newdataMoverBackup(logger, config)
s.run()
},
}
command.Flags().Var(logLevelFlag, "log-level", fmt.Sprintf("The level at which to log. Valid values are %s.", strings.Join(logLevelFlag.AllowedValues(), ", ")))
command.Flags().Var(formatFlag, "log-format", fmt.Sprintf("The format for log output. Valid values are %s.", strings.Join(formatFlag.AllowedValues(), ", ")))
command.Flags().StringVar(&config.volumePath, "volume-path", config.volumePath, "The full path of the volume to be backed up")
command.Flags().StringVar(&config.volumeMode, "volume-mode", config.volumeMode, "The mode of the volume to be backed up")
command.Flags().StringVar(&config.duName, "data-upload", config.duName, "The data upload name")
command.Flags().DurationVar(&config.resourceTimeout, "resource-timeout", config.resourceTimeout, "How long to wait for resource processes which are not covered by other specific timeout parameters.")
_ = command.MarkFlagRequired("volume-path")
_ = command.MarkFlagRequired("volume-mode")
_ = command.MarkFlagRequired("data-upload")
_ = command.MarkFlagRequired("resource-timeout")
return command
}
type dataMoverBackup struct {
logger logrus.FieldLogger
config dataMoverBackupConfig
}
func newdataMoverBackup(logger logrus.FieldLogger, config dataMoverBackupConfig) *dataMoverBackup {
s := &dataMoverBackup{
logger: logger,
config: config,
}
return s
}
func (s *dataMoverBackup) run() {
time.Sleep(time.Duration(1<<63 - 1))
}

View File

@ -0,0 +1,67 @@
/*
Copyright The Velero Contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package datamover
import (
"fmt"
"os"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/vmware-tanzu/velero/pkg/client"
)
func NewCommand(f client.Factory) *cobra.Command {
command := &cobra.Command{
Use: "data-mover",
Short: "Run the velero data-mover",
Long: "Run the velero data-mover",
Hidden: true,
}
command.AddCommand(
NewBackupCommand(f),
NewRestoreCommand(f),
)
return command
}
var funcExit = os.Exit
var funcCreateFile = os.Create
func exitWithMessage(logger logrus.FieldLogger, succeed bool, message string, a ...any) {
exitCode := 0
if !succeed {
exitCode = 1
}
toWrite := fmt.Sprintf(message, a...)
podFile, err := funcCreateFile("/dev/termination-log")
if err != nil {
logger.WithError(err).Error("Failed to create termination log file")
exitCode = 1
} else {
if _, err := podFile.WriteString(toWrite); err != nil {
logger.WithError(err).Error("Failed to write error to termination log file")
exitCode = 1
}
podFile.Close()
}
funcExit(exitCode)
}

View File

@ -0,0 +1,131 @@
/*
Copyright The Velero Contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package datamover
import (
"errors"
"fmt"
"io"
"os"
"path/filepath"
"testing"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
velerotest "github.com/vmware-tanzu/velero/pkg/test"
)
type exitWithMessageMock struct {
createErr error
writeFail bool
filePath string
exitCode int
}
func (em *exitWithMessageMock) Exit(code int) {
em.exitCode = code
}
func (em *exitWithMessageMock) CreateFile(name string) (*os.File, error) {
if em.createErr != nil {
return nil, em.createErr
}
if em.writeFail {
return os.OpenFile(em.filePath, os.O_CREATE|os.O_RDONLY, 0500)
} else {
return os.Create(em.filePath)
}
}
func TestExitWithMessage(t *testing.T) {
tests := []struct {
name string
message string
succeed bool
args []interface{}
createErr error
writeFail bool
expectedExitCode int
expectedMessage string
}{
{
name: "create pod file failed",
createErr: errors.New("fake-create-file-error"),
succeed: true,
expectedExitCode: 1,
},
{
name: "write pod file failed",
writeFail: true,
succeed: true,
expectedExitCode: 1,
},
{
name: "not succeed",
message: "fake-message-1, arg-1 %s, arg-2 %v, arg-3 %v",
args: []interface{}{
"arg-1-1",
10,
false,
},
expectedExitCode: 1,
expectedMessage: fmt.Sprintf("fake-message-1, arg-1 %s, arg-2 %v, arg-3 %v", "arg-1-1", 10, false),
},
{
name: "not succeed",
message: "fake-message-2, arg-1 %s, arg-2 %v, arg-3 %v",
args: []interface{}{
"arg-1-2",
20,
true,
},
succeed: true,
expectedMessage: fmt.Sprintf("fake-message-2, arg-1 %s, arg-2 %v, arg-3 %v", "arg-1-2", 20, true),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
podFile := filepath.Join(os.TempDir(), uuid.NewString())
em := exitWithMessageMock{
createErr: test.createErr,
writeFail: test.writeFail,
filePath: podFile,
}
funcExit = em.Exit
funcCreateFile = em.CreateFile
exitWithMessage(velerotest.NewLogger(), test.succeed, test.message, test.args...)
assert.Equal(t, test.expectedExitCode, em.exitCode)
if test.createErr == nil && !test.writeFail {
reader, err := os.Open(podFile)
require.NoError(t, err)
message, err := io.ReadAll(reader)
require.NoError(t, err)
reader.Close()
assert.Equal(t, string(message), test.expectedMessage)
}
})
}
}

View File

@ -0,0 +1,92 @@
/*
Copyright The Velero Contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package datamover
import (
"fmt"
"strings"
"time"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/vmware-tanzu/velero/pkg/buildinfo"
"github.com/vmware-tanzu/velero/pkg/client"
"github.com/vmware-tanzu/velero/pkg/util/logging"
)
type dataMoverRestoreConfig struct {
volumePath string
volumeMode string
ddName string
resourceTimeout time.Duration
}
func NewRestoreCommand(f client.Factory) *cobra.Command {
logLevelFlag := logging.LogLevelFlag(logrus.InfoLevel)
formatFlag := logging.NewFormatFlag()
config := dataMoverRestoreConfig{}
command := &cobra.Command{
Use: "restore",
Short: "Run the velero data-mover restore",
Long: "Run the velero data-mover restore",
Hidden: true,
Run: func(c *cobra.Command, args []string) {
logLevel := logLevelFlag.Parse()
logrus.Infof("Setting log-level to %s", strings.ToUpper(logLevel.String()))
logger := logging.DefaultLogger(logLevel, formatFlag.Parse())
logger.Infof("Starting Velero data-mover restore %s (%s)", buildinfo.Version, buildinfo.FormattedGitSHA())
f.SetBasename(fmt.Sprintf("%s-%s", c.Parent().Name(), c.Name()))
s := newdataMoverRestore(logger, config)
s.run()
},
}
command.Flags().Var(logLevelFlag, "log-level", fmt.Sprintf("The level at which to log. Valid values are %s.", strings.Join(logLevelFlag.AllowedValues(), ", ")))
command.Flags().Var(formatFlag, "log-format", fmt.Sprintf("The format for log output. Valid values are %s.", strings.Join(formatFlag.AllowedValues(), ", ")))
command.Flags().StringVar(&config.volumePath, "volume-path", config.volumePath, "The full path of the volume to be restored")
command.Flags().StringVar(&config.volumeMode, "volume-mode", config.volumeMode, "The mode of the volume to be restored")
command.Flags().StringVar(&config.ddName, "data-download", config.ddName, "The data download name")
command.Flags().DurationVar(&config.resourceTimeout, "resource-timeout", config.resourceTimeout, "How long to wait for resource processes which are not covered by other specific timeout parameters.")
_ = command.MarkFlagRequired("volume-path")
_ = command.MarkFlagRequired("volume-mode")
_ = command.MarkFlagRequired("data-download")
_ = command.MarkFlagRequired("resource-timeout")
return command
}
type dataMoverRestore struct {
logger logrus.FieldLogger
config dataMoverRestoreConfig
}
func newdataMoverRestore(logger logrus.FieldLogger, config dataMoverRestoreConfig) *dataMoverRestore {
s := &dataMoverRestore{
logger: logger,
config: config,
}
return s
}
func (s *dataMoverRestore) run() {
time.Sleep(time.Duration(1<<63 - 1))
}

View File

@ -51,6 +51,7 @@ import (
veleroflag "github.com/vmware-tanzu/velero/pkg/cmd/util/flag"
"github.com/vmware-tanzu/velero/pkg/features"
"github.com/vmware-tanzu/velero/pkg/cmd/cli/datamover"
"github.com/vmware-tanzu/velero/pkg/cmd/cli/nodeagent"
)
@ -124,6 +125,7 @@ operations can also be performed as 'velero backup get' and 'velero schedule cre
snapshotlocation.NewCommand(f),
debug.NewCommand(f),
repomantenance.NewCommand(f),
datamover.NewCommand(f),
)
// init and add the klog flags

View File

@ -162,7 +162,17 @@ func TestDataDownloadReconcile(t *testing.T) {
Kind: "DaemonSet",
APIVersion: appsv1.SchemeGroupVersion.String(),
},
Spec: appsv1.DaemonSetSpec{},
Spec: appsv1.DaemonSetSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Image: "fake-image",
},
},
},
},
},
}
tests := []struct {

View File

@ -170,7 +170,17 @@ func initDataUploaderReconcilerWithError(needError ...error) (*DataUploadReconci
Kind: "DaemonSet",
APIVersion: appsv1.SchemeGroupVersion.String(),
},
Spec: appsv1.DaemonSetSpec{},
Spec: appsv1.DaemonSetSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Image: "fake-image",
},
},
},
},
},
}
dataPathMgr := datapath.NewManager(1)

View File

@ -18,6 +18,7 @@ package exposer
import (
"context"
"fmt"
"time"
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
@ -174,7 +175,7 @@ func (e *csiSnapshotExposer) Expose(ctx context.Context, ownerObject corev1.Obje
}
}()
backupPod, err := e.createBackupPod(ctx, ownerObject, backupPVC, csiExposeParam.HostingPodLabels, csiExposeParam.Affinity)
backupPod, err := e.createBackupPod(ctx, ownerObject, backupPVC, csiExposeParam.OperationTimeout, csiExposeParam.HostingPodLabels, csiExposeParam.Affinity)
if err != nil {
return errors.Wrap(err, "error to create backup pod")
}
@ -195,6 +196,8 @@ func (e *csiSnapshotExposer) GetExposed(ctx context.Context, ownerObject corev1.
backupPodName := ownerObject.Name
backupPVCName := ownerObject.Name
containerName := string(ownerObject.UID)
volumeName := string(ownerObject.UID)
curLog := e.log.WithFields(logrus.Fields{
@ -237,7 +240,11 @@ func (e *csiSnapshotExposer) GetExposed(ctx context.Context, ownerObject corev1.
curLog.WithField("pod", pod.Name).Infof("Backup volume is found in pod at index %v", i)
return &ExposeResult{ByPod: ExposeByPod{HostingPod: pod, VolumeName: volumeName}}, nil
return &ExposeResult{ByPod: ExposeByPod{
HostingPod: pod,
HostingContainer: containerName,
VolumeName: volumeName,
}}, nil
}
func (e *csiSnapshotExposer) PeekExposed(ctx context.Context, ownerObject corev1.ObjectReference) error {
@ -391,12 +398,12 @@ func (e *csiSnapshotExposer) createBackupPVC(ctx context.Context, ownerObject co
return created, err
}
func (e *csiSnapshotExposer) createBackupPod(ctx context.Context, ownerObject corev1.ObjectReference, backupPVC *corev1.PersistentVolumeClaim,
func (e *csiSnapshotExposer) createBackupPod(ctx context.Context, ownerObject corev1.ObjectReference, backupPVC *corev1.PersistentVolumeClaim, operationTimeout time.Duration,
label map[string]string, affinity *nodeagent.LoadAffinity) (*corev1.Pod, error) {
podName := ownerObject.Name
volumeName := string(ownerObject.UID)
containerName := string(ownerObject.UID)
volumeName := string(ownerObject.UID)
podInfo, err := getInheritedPodInfo(ctx, e.kubeClient, ownerObject.Namespace)
if err != nil {
@ -404,14 +411,41 @@ func (e *csiSnapshotExposer) createBackupPod(ctx context.Context, ownerObject co
}
var gracePeriod int64 = 0
volumeMounts, volumeDevices := kube.MakePodPVCAttachment(volumeName, backupPVC.Spec.VolumeMode)
volumeMounts, volumeDevices, volumePath := kube.MakePodPVCAttachment(volumeName, backupPVC.Spec.VolumeMode)
volumeMounts = append(volumeMounts, podInfo.volumeMounts...)
volumes := []corev1.Volume{{
Name: volumeName,
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: backupPVC.Name,
},
},
}}
volumes = append(volumes, podInfo.volumes...)
if label == nil {
label = make(map[string]string)
}
label[podGroupLabel] = podGroupSnapshot
volumeMode := corev1.PersistentVolumeFilesystem
if backupPVC.Spec.VolumeMode != nil {
volumeMode = *backupPVC.Spec.VolumeMode
}
args := []string{
fmt.Sprintf("--volume-path=%s", volumePath),
fmt.Sprintf("--volume-mode=%s", volumeMode),
fmt.Sprintf("--data-upload=%s", ownerObject.Name),
fmt.Sprintf("--resource-timeout=%s", operationTimeout.String()),
}
args = append(args, podInfo.logFormatArgs...)
args = append(args, podInfo.logLevelArgs...)
userID := int64(0)
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
@ -446,21 +480,24 @@ func (e *csiSnapshotExposer) createBackupPod(ctx context.Context, ownerObject co
Name: containerName,
Image: podInfo.image,
ImagePullPolicy: corev1.PullNever,
Command: []string{"/velero-helper", "pause"},
VolumeMounts: volumeMounts,
VolumeDevices: volumeDevices,
Command: []string{
"/velero",
"data-mover",
"backup",
},
Args: args,
VolumeMounts: volumeMounts,
VolumeDevices: volumeDevices,
Env: podInfo.env,
},
},
ServiceAccountName: podInfo.serviceAccount,
TerminationGracePeriodSeconds: &gracePeriod,
Volumes: []corev1.Volume{{
Name: volumeName,
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: backupPVC.Name,
},
},
}},
Volumes: volumes,
RestartPolicy: corev1.RestartPolicyNever,
SecurityContext: &corev1.PodSecurityContext{
RunAsUser: &userID,
},
},
}

View File

@ -138,7 +138,17 @@ func TestExpose(t *testing.T) {
Kind: "DaemonSet",
APIVersion: appsv1.SchemeGroupVersion.String(),
},
Spec: appsv1.DaemonSetSpec{},
Spec: appsv1.DaemonSetSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "node-agent",
},
},
},
},
},
}
tests := []struct {

View File

@ -87,7 +87,7 @@ func (e *genericRestoreExposer) Expose(ctx context.Context, ownerObject corev1.O
return errors.Errorf("Target PVC %s/%s has already been bound, abort", sourceNamespace, targetPVCName)
}
restorePod, err := e.createRestorePod(ctx, ownerObject, targetPVC, hostingPodLabels, selectedNode)
restorePod, err := e.createRestorePod(ctx, ownerObject, targetPVC, timeout, hostingPodLabels, selectedNode)
if err != nil {
return errors.Wrapf(err, "error to create restore pod")
}
@ -119,6 +119,8 @@ func (e *genericRestoreExposer) Expose(ctx context.Context, ownerObject corev1.O
func (e *genericRestoreExposer) GetExposed(ctx context.Context, ownerObject corev1.ObjectReference, nodeClient client.Client, nodeName string, timeout time.Duration) (*ExposeResult, error) {
restorePodName := ownerObject.Name
restorePVCName := ownerObject.Name
containerName := string(ownerObject.UID)
volumeName := string(ownerObject.UID)
curLog := e.log.WithFields(logrus.Fields{
@ -162,7 +164,11 @@ func (e *genericRestoreExposer) GetExposed(ctx context.Context, ownerObject core
curLog.WithField("pod", pod.Name).Infof("Restore volume is found in pod at index %v", i)
return &ExposeResult{ByPod: ExposeByPod{HostingPod: pod, VolumeName: volumeName}}, nil
return &ExposeResult{ByPod: ExposeByPod{
HostingPod: pod,
HostingContainer: containerName,
VolumeName: volumeName,
}}, nil
}
func (e *genericRestoreExposer) PeekExposed(ctx context.Context, ownerObject corev1.ObjectReference) error {
@ -291,12 +297,12 @@ func (e *genericRestoreExposer) RebindVolume(ctx context.Context, ownerObject co
}
func (e *genericRestoreExposer) createRestorePod(ctx context.Context, ownerObject corev1.ObjectReference, targetPVC *corev1.PersistentVolumeClaim,
label map[string]string, selectedNode string) (*corev1.Pod, error) {
operationTimeout time.Duration, label map[string]string, selectedNode string) (*corev1.Pod, error) {
restorePodName := ownerObject.Name
restorePVCName := ownerObject.Name
volumeName := string(ownerObject.UID)
containerName := string(ownerObject.UID)
volumeName := string(ownerObject.UID)
podInfo, err := getInheritedPodInfo(ctx, e.kubeClient, ownerObject.Namespace)
if err != nil {
@ -304,7 +310,35 @@ func (e *genericRestoreExposer) createRestorePod(ctx context.Context, ownerObjec
}
var gracePeriod int64 = 0
volumeMounts, volumeDevices := kube.MakePodPVCAttachment(volumeName, targetPVC.Spec.VolumeMode)
volumeMounts, volumeDevices, volumePath := kube.MakePodPVCAttachment(volumeName, targetPVC.Spec.VolumeMode)
volumeMounts = append(volumeMounts, podInfo.volumeMounts...)
volumes := []corev1.Volume{{
Name: volumeName,
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: restorePVCName,
},
},
}}
volumes = append(volumes, podInfo.volumes...)
volumeMode := corev1.PersistentVolumeFilesystem
if targetPVC.Spec.VolumeMode != nil {
volumeMode = *targetPVC.Spec.VolumeMode
}
args := []string{
fmt.Sprintf("--volume-path=%s", volumePath),
fmt.Sprintf("--volume-mode=%s", volumeMode),
fmt.Sprintf("--data-download=%s", ownerObject.Name),
fmt.Sprintf("--resource-timeout=%s", operationTimeout.String()),
}
args = append(args, podInfo.logFormatArgs...)
args = append(args, podInfo.logLevelArgs...)
userID := int64(0)
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -327,22 +361,25 @@ func (e *genericRestoreExposer) createRestorePod(ctx context.Context, ownerObjec
Name: containerName,
Image: podInfo.image,
ImagePullPolicy: corev1.PullNever,
Command: []string{"/velero-helper", "pause"},
VolumeMounts: volumeMounts,
VolumeDevices: volumeDevices,
Command: []string{
"/velero",
"data-mover",
"restore",
},
Args: args,
VolumeMounts: volumeMounts,
VolumeDevices: volumeDevices,
Env: podInfo.env,
},
},
ServiceAccountName: podInfo.serviceAccount,
TerminationGracePeriodSeconds: &gracePeriod,
Volumes: []corev1.Volume{{
Name: volumeName,
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: restorePVCName,
},
},
}},
NodeName: selectedNode,
Volumes: volumes,
NodeName: selectedNode,
RestartPolicy: corev1.RestartPolicyNever,
SecurityContext: &corev1.PodSecurityContext{
RunAsUser: &userID,
},
},
}

View File

@ -31,6 +31,7 @@ import (
velerotest "github.com/vmware-tanzu/velero/pkg/test"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
corev1api "k8s.io/api/core/v1"
clientTesting "k8s.io/client-go/testing"
)
@ -74,7 +75,17 @@ func TestRestoreExpose(t *testing.T) {
Kind: "DaemonSet",
APIVersion: appsv1.SchemeGroupVersion.String(),
},
Spec: appsv1.DaemonSetSpec{},
Spec: appsv1.DaemonSetSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Image: "fake-image",
},
},
},
},
},
}
tests := []struct {

View File

@ -18,8 +18,10 @@ package exposer
import (
"context"
"strings"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
"github.com/vmware-tanzu/velero/pkg/nodeagent"
@ -28,6 +30,11 @@ import (
type inheritedPodInfo struct {
image string
serviceAccount string
env []v1.EnvVar
volumeMounts []v1.VolumeMount
volumes []v1.Volume
logLevelArgs []string
logFormatArgs []string
}
func getInheritedPodInfo(ctx context.Context, client kubernetes.Interface, veleroNamespace string) (inheritedPodInfo, error) {
@ -39,11 +46,28 @@ func getInheritedPodInfo(ctx context.Context, client kubernetes.Interface, veler
}
if len(podSpec.Containers) != 1 {
return podInfo, errors.Wrap(err, "unexpected pod template from node-agent")
return podInfo, errors.New("unexpected pod template from node-agent")
}
podInfo.image = podSpec.Containers[0].Image
podInfo.serviceAccount = podSpec.ServiceAccountName
podInfo.env = podSpec.Containers[0].Env
podInfo.volumeMounts = podSpec.Containers[0].VolumeMounts
podInfo.volumes = podSpec.Volumes
args := podSpec.Containers[0].Args
for i, arg := range args {
if arg == "--log-format" {
podInfo.logFormatArgs = append(podInfo.logFormatArgs, args[i:i+2]...)
} else if strings.HasPrefix(arg, "--log-format") {
podInfo.logFormatArgs = append(podInfo.logFormatArgs, arg)
} else if arg == "--log-level" {
podInfo.logLevelArgs = append(podInfo.logLevelArgs, args[i:i+2]...)
} else if strings.HasPrefix(arg, "--log-level") {
podInfo.logLevelArgs = append(podInfo.logLevelArgs, arg)
}
}
return podInfo, nil
}

271
pkg/exposer/image_test.go Normal file
View File

@ -0,0 +1,271 @@
/*
Copyright The Velero Contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package exposer
import (
"context"
"reflect"
"testing"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes/fake"
)
func TestGetInheritedPodInfo(t *testing.T) {
daemonSet := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Namespace: "fake-ns",
Name: "node-agent",
},
TypeMeta: metav1.TypeMeta{
Kind: "DaemonSet",
},
}
daemonSetWithNoLog := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Namespace: "fake-ns",
Name: "node-agent",
},
TypeMeta: metav1.TypeMeta{
Kind: "DaemonSet",
},
Spec: appsv1.DaemonSetSpec{
Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "container-1",
Image: "image-1",
Env: []v1.EnvVar{
{
Name: "env-1",
Value: "value-1",
},
{
Name: "env-2",
Value: "value-2",
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "volume-1",
},
{
Name: "volume-2",
},
},
},
},
Volumes: []v1.Volume{
{
Name: "volume-1",
},
{
Name: "volume-2",
},
},
ServiceAccountName: "sa-1",
},
},
},
}
daemonSetWithLog := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Namespace: "fake-ns",
Name: "node-agent",
},
TypeMeta: metav1.TypeMeta{
Kind: "DaemonSet",
},
Spec: appsv1.DaemonSetSpec{
Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "container-1",
Image: "image-1",
Env: []v1.EnvVar{
{
Name: "env-1",
Value: "value-1",
},
{
Name: "env-2",
Value: "value-2",
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "volume-1",
},
{
Name: "volume-2",
},
},
Args: []string{
"--log-format=json",
"--log-level",
"debug",
},
Command: []string{
"command-1",
},
},
},
Volumes: []v1.Volume{
{
Name: "volume-1",
},
{
Name: "volume-2",
},
},
ServiceAccountName: "sa-1",
},
},
},
}
scheme := runtime.NewScheme()
appsv1.AddToScheme(scheme)
tests := []struct {
name string
namespace string
client kubernetes.Interface
kubeClientObj []runtime.Object
result inheritedPodInfo
expectErr string
}{
{
name: "ds is not found",
namespace: "fake-ns",
expectErr: "error to get node-agent pod template: error to get node-agent daemonset: daemonsets.apps \"node-agent\" not found",
},
{
name: "ds pod container number is invalidate",
namespace: "fake-ns",
kubeClientObj: []runtime.Object{
daemonSet,
},
expectErr: "unexpected pod template from node-agent",
},
{
name: "no log info",
namespace: "fake-ns",
kubeClientObj: []runtime.Object{
daemonSetWithNoLog,
},
result: inheritedPodInfo{
image: "image-1",
serviceAccount: "sa-1",
env: []v1.EnvVar{
{
Name: "env-1",
Value: "value-1",
},
{
Name: "env-2",
Value: "value-2",
},
},
volumeMounts: []v1.VolumeMount{
{
Name: "volume-1",
},
{
Name: "volume-2",
},
},
volumes: []v1.Volume{
{
Name: "volume-1",
},
{
Name: "volume-2",
},
},
},
},
{
name: "with log info",
namespace: "fake-ns",
kubeClientObj: []runtime.Object{
daemonSetWithLog,
},
result: inheritedPodInfo{
image: "image-1",
serviceAccount: "sa-1",
env: []v1.EnvVar{
{
Name: "env-1",
Value: "value-1",
},
{
Name: "env-2",
Value: "value-2",
},
},
volumeMounts: []v1.VolumeMount{
{
Name: "volume-1",
},
{
Name: "volume-2",
},
},
volumes: []v1.Volume{
{
Name: "volume-1",
},
{
Name: "volume-2",
},
},
logFormatArgs: []string{
"--log-format=json",
},
logLevelArgs: []string{
"--log-level",
"debug",
},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...)
info, err := getInheritedPodInfo(context.Background(), fakeKubeClient, test.namespace)
if test.expectErr == "" {
assert.NoError(t, err)
assert.True(t, reflect.DeepEqual(info, test.result))
} else {
assert.EqualError(t, err, test.expectErr)
}
})
}
}

View File

@ -35,6 +35,7 @@ type ExposeResult struct {
// ExposeByPod defines the result for the expose method that a hosting pod is created
type ExposeByPod struct {
HostingPod *corev1.Pod
VolumeName string
HostingPod *corev1.Pod
HostingContainer string
VolumeName string
}

View File

@ -340,23 +340,24 @@ func IsPVCBound(pvc *corev1api.PersistentVolumeClaim) bool {
}
// MakePodPVCAttachment returns the volume mounts and devices for a pod needed to attach a PVC
func MakePodPVCAttachment(volumeName string, volumeMode *corev1api.PersistentVolumeMode) ([]corev1api.VolumeMount, []corev1api.VolumeDevice) {
func MakePodPVCAttachment(volumeName string, volumeMode *corev1api.PersistentVolumeMode) ([]corev1api.VolumeMount, []corev1api.VolumeDevice, string) {
var volumeMounts []corev1api.VolumeMount = nil
var volumeDevices []corev1api.VolumeDevice = nil
volumePath := "/" + volumeName
if volumeMode != nil && *volumeMode == corev1api.PersistentVolumeBlock {
volumeDevices = []corev1api.VolumeDevice{{
Name: volumeName,
DevicePath: "/" + volumeName,
DevicePath: volumePath,
}}
} else {
volumeMounts = []corev1api.VolumeMount{{
Name: volumeName,
MountPath: "/" + volumeName,
MountPath: volumePath,
}}
}
return volumeMounts, volumeDevices
return volumeMounts, volumeDevices, volumePath
}
func GetPVForPVC(

View File

@ -1378,3 +1378,65 @@ func TestGetPVCForPodVolume(t *testing.T) {
})
}
}
func TestMakePodPVCAttachment(t *testing.T) {
testCases := []struct {
name string
volumeName string
volumeMode corev1api.PersistentVolumeMode
expectedVolumeMount []corev1api.VolumeMount
expectedVolumeDevice []corev1api.VolumeDevice
expectedVolumePath string
}{
{
name: "no volume mode specified",
volumeName: "volume-1",
expectedVolumeMount: []corev1api.VolumeMount{
{
Name: "volume-1",
MountPath: "/volume-1",
},
},
expectedVolumePath: "/volume-1",
},
{
name: "fs mode specified",
volumeName: "volume-2",
volumeMode: corev1api.PersistentVolumeFilesystem,
expectedVolumeMount: []corev1api.VolumeMount{
{
Name: "volume-2",
MountPath: "/volume-2",
},
},
expectedVolumePath: "/volume-2",
},
{
name: "block volume mode specified",
volumeName: "volume-3",
volumeMode: corev1api.PersistentVolumeBlock,
expectedVolumeDevice: []corev1api.VolumeDevice{
{
Name: "volume-3",
DevicePath: "/volume-3",
},
},
expectedVolumePath: "/volume-3",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
var volMode *v1.PersistentVolumeMode
if tc.volumeMode != "" {
volMode = &tc.volumeMode
}
mount, device, path := MakePodPVCAttachment(tc.volumeName, volMode)
assert.Equal(t, tc.expectedVolumeMount, mount)
assert.Equal(t, tc.expectedVolumeDevice, device)
assert.Equal(t, tc.expectedVolumePath, path)
})
}
}