debug opt in out

Signed-off-by: danfengl <danfengl@vmware.com>
pull/5524/head
danfengl 2022-10-21 04:25:39 +00:00
parent c24855129a
commit a411130256
10 changed files with 202 additions and 25 deletions

View File

@ -49,6 +49,6 @@ func RunCommand(cmd *exec.Cmd) (string, string, error) {
} else {
stderr = string(res)
}
cmd.Process.Kill()
return stdout, stderr, runErr
}

View File

@ -55,7 +55,7 @@ VELERO_VERSION ?= $(VERSION)
PLUGINS ?=
RESTORE_HELPER_IMAGE ?=
#Released version only
UPGRADE_FROM_VELERO_VERSION ?= v1.7.1,v1.8.1
UPGRADE_FROM_VELERO_VERSION ?= v1.8.1,v1.9.2
# UPGRADE_FROM_VELERO_CLI can has the same format(a list divided by comma) with UPGRADE_FROM_VELERO_VERSION
# Upgrade tests will be executed sequently according to the list by UPGRADE_FROM_VELERO_VERSION
# So although length of UPGRADE_FROM_VELERO_CLI list is not equal with UPGRADE_FROM_VELERO_VERSION

View File

@ -27,8 +27,8 @@ const POD_COUNT, VOLUME_COUNT_PER_POD = 2, 3
const OPT_IN_ANN, OPT_OUT_ANN = "backup.velero.io/backup-volumes", "backup.velero.io/backup-volumes-excludes"
const FILE_NAME = "test-data.txt"
var OptInPVBackupTest func() = TestFunc(&PVBackupFiltering{annotation: OPT_IN_ANN, id: "opt-in"})
var OptOutPVBackupTest func() = TestFunc(&PVBackupFiltering{annotation: OPT_OUT_ANN, id: "opt-out"})
var OptInPVBackupTest func() = TestFunc(&PVBackupFiltering{annotation: OPT_IN_ANN, id: "debug-opt-in"})
var OptOutPVBackupTest func() = TestFunc(&PVBackupFiltering{annotation: OPT_OUT_ANN, id: "debug-opt-out"})
func (p *PVBackupFiltering) Init() error {
p.Ctx, _ = context.WithTimeout(context.Background(), 60*time.Minute)

View File

@ -20,6 +20,7 @@ import (
"context"
"flag"
"fmt"
"strings"
"time"
. "github.com/onsi/ginkgo"
@ -177,10 +178,45 @@ func (t *TestCase) Restore() error {
}
By("Start to restore ......", func() {
Expect(VeleroRestoreExec(t.Ctx, VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, t.RestoreName, t.RestoreArgs)).To(Succeed(), func() string {
quitCh := make(chan struct{})
if strings.Contains(t.RestoreName, "-opt-") {
for _, ns := range *t.NSIncluded {
go func() {
for {
select {
case <-quitCh:
return
default:
}
fmt.Printf("start to get log for namespace %s ......", ns)
arg0 := []string{"-u"}
KubectlGetInfo("date", arg0)
arg := []string{"get", "all", "-n", ns}
KubectlGetInfo("kubectl", arg)
time.Sleep(5 * time.Second)
arg1 := []string{"get", "pvc", "-n", ns}
KubectlGetInfo("kubectl", arg1)
time.Sleep(5 * time.Second)
arg2 := []string{"get", "pv"}
KubectlGetInfo("kubectl", arg2)
time.Sleep(5 * time.Second)
arg3 := []string{"get", "events", "-o", "custom-columns=FirstSeen:.firstTimestamp,Count:.count,From:.source.component,Type:.type,Reason:.reason,Message:.message", "--all-namespaces"}
KubectlGetInfo("kubectl", arg3)
time.Sleep(20 * time.Second)
}
}()
}
}
var err error
if err = VeleroRestoreExec(t.Ctx, VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, t.RestoreName, t.RestoreArgs); err != nil {
RunDebug(context.Background(), VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, "", t.RestoreName)
return "Fail to restore workload"
})
}
close(quitCh)
Expect(err).To(BeNil())
// Expect(VeleroRestoreExec(t.Ctx, VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, t.RestoreName, t.RestoreArgs)).To(Succeed(), func() string {
// RunDebug(context.Background(), VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, "", t.RestoreName)
// return "Fail to restore workload"
// })
})
return nil
}

View File

@ -91,7 +91,7 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC
UUIDgen, err = uuid.NewRandom()
Expect(err).To(Succeed())
oneHourTimeout, _ := context.WithTimeout(context.Background(), time.Minute*60)
fmt.Println(veleroCLI2Version.VeleroCLI)
if veleroCLI2Version.VeleroCLI == "" {
//Assume tag of velero server image is identical to velero CLI version
//Download velero CLI if it's empty according to velero CLI version
@ -121,7 +121,7 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC
Expect(CheckVeleroVersion(context.Background(), tmpCfgForOldVeleroInstall.VeleroCLI,
tmpCfgForOldVeleroInstall.UpgradeFromVeleroVersion)).To(Succeed())
})
time.Sleep(100000 * time.Minute)
backupName = "backup-" + UUIDgen.String()
restoreName = "restore-" + UUIDgen.String()
tmpCfg := VeleroCfg
@ -200,7 +200,17 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC
tmpCfg.GCFrequency = ""
tmpCfg.UseNodeAgent = !useVolumeSnapshots
tmpCfg.UseRestic = false
Expect(VeleroInstall(context.Background(), &tmpCfg, useVolumeSnapshots)).To(Succeed())
tmpCfg.UploaderType = "restic"
tmpCfg.VeleroVersion = "main"
fmt.Println("tmpCfg.VeleroVersion")
fmt.Println(tmpCfg.VeleroVersion)
fmt.Println("tmpCfg.UploaderType")
fmt.Println(tmpCfg.UploaderType)
output, err := VeleroUpgrade(context.Background(), tmpCfg.VeleroCLI,
tmpCfg.VeleroNamespace, tmpCfg.VeleroVersion, tmpCfg.UploaderType)
fmt.Println(output)
time.Sleep(100000 * time.Minute)
Expect(err).To(Succeed())
Expect(CheckVeleroVersion(context.Background(), tmpCfg.VeleroCLI,
tmpCfg.VeleroVersion)).To(Succeed())
})

View File

@ -52,3 +52,68 @@ func GetListBy2Pipes(ctx context.Context, cmdline1, cmdline2, cmdline3 OsCommand
return ret, nil
}
func GetListBy5Pipes(ctx context.Context, cmdline1, cmdline2, cmdline3, cmdline4, cmdline5, cmdline6 OsCommandLine) ([]string, error) {
var b5 bytes.Buffer
var errVelero, errAwk error
c1 := exec.CommandContext(ctx, cmdline1.Cmd, cmdline1.Args...)
c2 := exec.Command(cmdline2.Cmd, cmdline2.Args...)
c3 := exec.Command(cmdline3.Cmd, cmdline3.Args...)
c4 := exec.Command(cmdline4.Cmd, cmdline4.Args...)
c5 := exec.Command(cmdline5.Cmd, cmdline5.Args...)
c6 := exec.Command(cmdline6.Cmd, cmdline6.Args...)
fmt.Println(c1)
fmt.Println(c2)
fmt.Println(c3)
fmt.Println(c4)
fmt.Println(c5)
fmt.Println(c6)
c2.Stdin, errVelero = c1.StdoutPipe()
if errVelero != nil {
return nil, errVelero
}
c3.Stdin, errAwk = c2.StdoutPipe()
if errAwk != nil {
return nil, errAwk
}
c4.Stdin, errAwk = c3.StdoutPipe()
if errAwk != nil {
return nil, errAwk
}
c5.Stdin, errAwk = c4.StdoutPipe()
if errAwk != nil {
return nil, errAwk
}
c6.Stdin, errAwk = c5.StdoutPipe()
if errAwk != nil {
return nil, errAwk
}
c6.Stdout = &b5
_ = c6.Start()
_ = c5.Start()
_ = c4.Start()
_ = c3.Start()
_ = c2.Start()
_ = c1.Run()
_ = c2.Wait()
_ = c3.Wait()
_ = c4.Wait()
_ = c5.Wait()
_ = c6.Wait()
//fmt.Println(&b2)
scanner := bufio.NewScanner(&b5)
var ret []string
for scanner.Scan() {
fmt.Printf("line: %s\n", scanner.Text())
ret = append(ret, scanner.Text())
}
if err := scanner.Err(); err != nil {
return nil, err
}
return ret, nil
}

View File

@ -282,3 +282,14 @@ func ReadFileFromPodVolume(ctx context.Context, namespace, podName, volume, file
fmt.Print(stderr)
return stdout, err
}
func KubectlGetInfo(cmdName string, arg []string) {
cmd := exec.CommandContext(context.Background(), cmdName, arg...)
fmt.Printf("Kubectl exec cmd =%v\n", cmd)
stdout, stderr, err := veleroexec.RunCommand(cmd)
fmt.Println(stdout)
if err != nil {
fmt.Println(stderr)
fmt.Println(err)
}
}

View File

@ -174,7 +174,8 @@ func installKibishii(ctx context.Context, namespace string, cloudPlatform, veler
}
func generateData(ctx context.Context, namespace string, kibishiiData *KibishiiData) error {
kibishiiGenerateCmd := exec.CommandContext(ctx, "kubectl", "exec", "-n", namespace, "jump-pad", "--",
timeout, _ := context.WithTimeout(context.Background(), time.Minute*10)
kibishiiGenerateCmd := exec.CommandContext(timeout, "kubectl", "exec", "-n", namespace, "jump-pad", "--",
"/usr/local/bin/generate.sh", strconv.Itoa(kibishiiData.Levels), strconv.Itoa(kibishiiData.DirsPerLevel),
strconv.Itoa(kibishiiData.FilesPerLevel), strconv.Itoa(kibishiiData.FileLength),
strconv.Itoa(kibishiiData.BlockSize), strconv.Itoa(kibishiiData.PassNum), strconv.Itoa(kibishiiData.ExpectedNodes))
@ -189,7 +190,7 @@ func generateData(ctx context.Context, namespace string, kibishiiData *KibishiiD
}
func verifyData(ctx context.Context, namespace string, kibishiiData *KibishiiData) error {
timeout, _ := context.WithTimeout(context.Background(), time.Minute*5)
timeout, _ := context.WithTimeout(context.Background(), time.Minute*10)
kibishiiVerifyCmd := exec.CommandContext(timeout, "kubectl", "exec", "-n", namespace, "jump-pad", "--",
"/usr/local/bin/verify.sh", strconv.Itoa(kibishiiData.Levels), strconv.Itoa(kibishiiData.DirsPerLevel),
strconv.Itoa(kibishiiData.FilesPerLevel), strconv.Itoa(kibishiiData.FileLength),

View File

@ -103,6 +103,7 @@ func VeleroInstall(ctx context.Context, veleroCfg *VerleroConfig, useVolumeSnaps
RestoreHelperImage: veleroCfg.RestoreHelperImage,
})
if err != nil {
RunDebug(context.Background(), veleroCfg.VeleroCLI, veleroCfg.VeleroNamespace, "", "")
return errors.WithMessagef(err, "Failed to install Velero in the cluster")
}
@ -394,7 +395,7 @@ func waitVeleroReady(ctx context.Context, namespace string, useNodeAgent bool) e
if useNodeAgent {
fmt.Println("Waiting for node-agent daemonset to be ready.")
err := wait.PollImmediate(5*time.Second, 1*time.Minute, func() (bool, error) {
err := wait.PollImmediate(5*time.Second, 10*time.Minute, func() (bool, error) {
stdout, stderr, err := velerexec.RunCommand(exec.CommandContext(ctx, "kubectl", "get", "daemonset/node-agent",
"-o", "json", "-n", namespace))
if err != nil {
@ -410,6 +411,21 @@ func waitVeleroReady(ctx context.Context, namespace string, useNodeAgent bool) e
return false, nil
})
if err != nil {
ns := namespace
fmt.Printf("start to get log for namespace %s ......", ns)
arg0 := []string{"-u"}
KubectlGetInfo("date", arg0)
arg := []string{"get", "all", "-n", ns}
KubectlGetInfo("kubectl", arg)
time.Sleep(5 * time.Second)
arg1 := []string{"get", "pvc", "-n", ns}
KubectlGetInfo("kubectl", arg1)
time.Sleep(5 * time.Second)
arg2 := []string{"get", "pv"}
KubectlGetInfo("kubectl", arg2)
time.Sleep(5 * time.Second)
arg3 := []string{"get", "events", "-o", "custom-columns=FirstSeen:.firstTimestamp,Count:.count,From:.source.component,Type:.type,Reason:.reason,Message:.message", "--all-namespaces"}
KubectlGetInfo("kubectl", arg3)
return errors.Wrap(err, "fail to wait for the node-agent ready")
}
}

View File

@ -473,6 +473,9 @@ func RunDebug(ctx context.Context, veleroCLI, veleroNamespace, backup, restore s
if err := VeleroCmdExec(ctx, veleroCLI, args); err != nil {
fmt.Println(errors.Wrapf(err, "failed to run the debug command"))
}
if strings.Contains(restore, "-opt-") || strings.Contains(backup, "-opt-") {
time.Sleep(24 * 60 * time.Minute)
}
}
func VeleroCreateBackupLocation(ctx context.Context,
@ -706,23 +709,30 @@ func CheckVeleroVersion(ctx context.Context, veleroCLI string, expectedVer strin
}
func InstallVeleroCLI(version string) (string, error) {
var tempVeleroCliDir string
name := "velero-" + version + "-" + runtime.GOOS + "-" + runtime.GOARCH
postfix := ".tar.gz"
tarball := name + postfix
tempFile, err := getVeleroCliTarball("https://github.com/vmware-tanzu/velero/releases/download/" + version + "/" + tarball)
if err != nil {
return "", errors.WithMessagef(err, "failed to get Velero CLI tarball")
}
tempVeleroCliDir, err := ioutil.TempDir("", "velero-test")
if err != nil {
return "", errors.WithMessagef(err, "failed to create temp dir for tarball extraction")
}
err := wait.PollImmediate(time.Second*5, time.Minute*5, func() (bool, error) {
tempFile, err := getVeleroCliTarball("https://github.com/vmware-tanzu/velero/releases/download/" + version + "/" + tarball)
if err != nil {
return false, errors.WithMessagef(err, "failed to get Velero CLI tarball")
}
tempVeleroCliDir, err = ioutil.TempDir("", "velero-test")
if err != nil {
return false, errors.WithMessagef(err, "failed to create temp dir for tarball extraction")
}
cmd := exec.Command("tar", "-xvf", tempFile.Name(), "-C", tempVeleroCliDir)
defer os.Remove(tempFile.Name())
cmd := exec.Command("tar", "-xvf", tempFile.Name(), "-C", tempVeleroCliDir)
defer os.Remove(tempFile.Name())
if _, err := cmd.Output(); err != nil {
return "", errors.WithMessagef(err, "failed to extract file from velero CLI tarball")
if _, err := cmd.Output(); err != nil {
return false, errors.WithMessagef(err, "failed to extract file from velero CLI tarball")
}
return true, nil
})
if err != nil {
return "", errors.WithMessagef(err, "failed to install velero CLI")
}
return tempVeleroCliDir + "/" + name + "/velero", nil
}
@ -1081,3 +1091,31 @@ func GetSchedule(ctx context.Context, veleroNamespace, scheduleName string) (str
}
return stdout, err
}
func VeleroUpgrade(ctx context.Context, veleroCLI, veleroNamespace, toVeleroVersion, uploaderType string) ([]string, error) {
CmdLine1 := &common.OsCommandLine{
Cmd: "kubectl",
Args: []string{"get", "deploy", "-n", veleroNamespace},
}
CmdLine2 := &common.OsCommandLine{
Cmd: "sed",
Args: []string{fmt.Sprintf("s#\"image\"\\: \"velero\\/velero\\:v[0-9]*.[0-9]*.[0-9]*\"#\"image\"\\: \"velero\\/velero\\:%s\"#g", toVeleroVersion)},
}
CmdLine3 := &common.OsCommandLine{
Cmd: "sed",
Args: []string{fmt.Sprintf("s#\"--default-volumes-to-restic=true\"#\"--default-volumes-to-fs-backup=true\",\"--uploader-type=%s\"#g", uploaderType)},
}
CmdLine4 := &common.OsCommandLine{
Cmd: "sed",
Args: []string{fmt.Sprintf("s#\"image\"\\: \"velero\\/velero\\:v[0-9]*.[0-9]*.[0-9]*\"#\"image\"\\: \"velero\\/velero\\:%s\"#g", toVeleroVersion)},
}
CmdLine5 := &common.OsCommandLine{
Cmd: "sed",
Args: []string{"s#restic-timeout#fs-backup-timeout#g"},
}
CmdLine6 := &common.OsCommandLine{
Cmd: "kubectl",
Args: []string{"apply", "-f", "-"},
}
return common.GetListBy5Pipes(ctx, *CmdLine1, *CmdLine2, *CmdLine3, *CmdLine4, *CmdLine5, *CmdLine6)
}