Less Retry on minikube, new auto fail helpers MustStart,MustRun

pull/5150/head
Medya Gh 2019-08-21 11:19:03 -07:00
parent 8796539112
commit 246f33f52b
17 changed files with 67 additions and 92 deletions

View File

@ -52,7 +52,7 @@ func TestDownloadOnly(t *testing.T) {
minHome := constants.GetMinipath()
for _, v := range []string{constants.OldestKubernetesVersion, constants.NewestKubernetesVersion} {
mk.StartWithFail("--download-only", fmt.Sprintf("--kubernetes-version=%s", v))
mk.MustStart("--download-only", fmt.Sprintf("--kubernetes-version=%s", v))
// checking if cached images are downloaded for example (kube-apiserver_v1.15.2, kube-scheduler_v1.15.2, ...)
_, imgs := constants.GetKubeadmCachedImages("", v)
for _, img := range imgs {

View File

@ -48,9 +48,9 @@ func testGvisorRestart(t *testing.T) {
mk := NewMinikubeRunner(t, p, "--wait=false")
defer mk.TearDown(t)
mk.StartWithFail("--container-runtime=containerd", "--docker-opt containerd=/var/run/containerd/containerd.sock")
mk.RunCommand("cache add gcr.io/k8s-minikube/gvisor-addon:latest", true)
mk.RunCommand("addons enable gvisor", true)
mk.MustStart("--container-runtime=containerd", "--docker-opt containerd=/var/run/containerd/containerd.sock")
mk.MustRun("cache add gcr.io/k8s-minikube/gvisor-addon:latest")
mk.MustRun("addons enable gvisor")
t.Log("waiting for gvisor controller to come up")
if err := waitForGvisorControllerRunning(p); err != nil {
@ -64,8 +64,8 @@ func testGvisorRestart(t *testing.T) {
}
deleteUntrustedWorkload(t, p)
mk.RunCommand("delete", true)
mk.StartWithFail("--container-runtime=containerd", "--docker-opt containerd=/var/run/containerd/containerd.sock")
mk.MustRun("delete")
mk.MustStart("--container-runtime=containerd", "--docker-opt containerd=/var/run/containerd/containerd.sock")
mk.CheckStatus(state.Running.String())
t.Log("waiting for gvisor controller to come up")

View File

@ -46,7 +46,7 @@ func TestDocker(t *testing.T) {
t.Logf("pre-delete failed (probably ok): %v", err)
}
mk.StartWithFail("--docker-env=FOO=BAR", "--docker-env=BAZ=BAT", "--docker-opt=debug", " --docker-opt=icc=true")
mk.MustStart("--docker-env=FOO=BAR", "--docker-env=BAZ=BAT", "--docker-opt=debug", " --docker-opt=icc=true")
mk.CheckStatus(state.Running.String())

View File

@ -128,7 +128,7 @@ func testIngressController(t *testing.T) {
mk := NewMinikubeRunner(t, p, "--wait=false")
kr := util.NewKubectlRunner(t, p)
mk.RunCommand("addons enable ingress", true)
mk.MustRun("addons enable ingress")
if err := waitForIngressControllerRunning(p); err != nil {
t.Fatalf("Failed waiting for ingress-controller to be up: %v", err)
}
@ -168,7 +168,7 @@ func testIngressController(t *testing.T) {
}
}
}()
mk.RunCommand("addons disable ingress", true)
mk.MustRun("addons disable ingress")
}
func testServicesList(t *testing.T) {
@ -177,7 +177,10 @@ func testServicesList(t *testing.T) {
mk := NewMinikubeRunner(t, p)
checkServices := func() error {
output, stderr := mk.RunCommand("service list", false)
output, stderr, err := mk.RunCommand("service list", false)
if err != nil {
return err
}
if !strings.Contains(output, "kubernetes") {
return fmt.Errorf("error, kubernetes service missing from output: %s, \n stderr: %s", output, stderr)
}
@ -191,7 +194,7 @@ func testRegistry(t *testing.T) {
t.Parallel()
p := profileName(t)
mk := NewMinikubeRunner(t, p)
mk.RunCommand("addons enable registry", true)
mk.MustRun("addons enable registry")
client, err := kapi.Client(p)
if err != nil {
t.Fatalf("getting kubernetes client: %v", err)
@ -210,7 +213,7 @@ func testRegistry(t *testing.T) {
if err := kapi.WaitForPodsWithLabelRunning(client, "kube-system", ps); err != nil {
t.Fatalf("waiting for registry-proxy pods: %v", err)
}
ip, stderr := mk.RunCommand("ip", true)
ip, stderr := mk.MustRun("ip")
ip = strings.TrimSpace(ip)
endpoint := fmt.Sprintf("http://%s:%d", ip, 5000)
u, err := url.Parse(endpoint)
@ -259,7 +262,7 @@ func testRegistry(t *testing.T) {
t.Errorf("failed to delete pod registry-test")
}
}()
mk.RunCommand("addons disable registry", true)
mk.MustRun("addons disable registry")
}
// waitForNginxRunning waits for nginx service to be up

View File

@ -34,7 +34,7 @@ func testClusterEnv(t *testing.T) {
mk := NewMinikubeRunner(t, p, "--wait=false")
// Set a specific shell syntax so that we don't have to handle every possible user shell
envOut, stderr := mk.RunCommand("docker-env --shell=bash", true)
envOut, stderr := mk.MustRun("docker-env --shell=bash")
vars := mk.ParseEnvCmdOutput(envOut)
if len(vars) == 0 {
t.Fatalf("Failed to parse env vars:\n%s, \n stderr: %s ", envOut, stderr)

View File

@ -27,13 +27,13 @@ func testClusterLogs(t *testing.T) {
t.Parallel()
p := profileName(t)
mk := NewMinikubeRunner(t, p)
logsCmdOutput := mk.GetLogs()
logsCmdStdout, _ := mk.GetLogs()
// check for # of lines or check for strings
logWords := []string{"minikube", ".go"}
for _, logWord := range logWords {
if !strings.Contains(logsCmdOutput, logWord) {
t.Fatalf("Error in logsCmdOutput, expected to find: %s. Output: %s", logWord, logsCmdOutput)
if !strings.Contains(logsCmdStdout, logWord) {
t.Fatalf("Error in logsCmdOutput, expected to find: %s. Output: %s", logWord, logsCmdStdout)
}
}
}

View File

@ -28,7 +28,7 @@ func testClusterSSH(t *testing.T) {
p := profileName(t)
mk := NewMinikubeRunner(t, p, "--wait=false")
expectedStr := "hello"
sshCmdOutput, stderr := mk.RunCommand("ssh echo "+expectedStr, true)
sshCmdOutput, stderr := mk.MustRun("ssh echo " + expectedStr)
if !strings.Contains(sshCmdOutput, expectedStr) {
t.Fatalf("ExpectedStr sshCmdOutput to be: %s. Output was: %s Stderr: %s", expectedStr, sshCmdOutput, stderr)
}

View File

@ -28,7 +28,7 @@ func testProfileList(t *testing.T) {
p := profileName(t)
t.Parallel()
mk := NewMinikubeRunner(t, p, "--wait=false")
out, stderr := mk.RunCommand("profile list", true)
out, stderr := mk.MustRun("profile list")
if !strings.Contains(out, p) {
t.Errorf("Error , failed to read profile name (%s) in `profile list` command output : \n %q : \n stderr: %s ", p, out, stderr)
}

View File

@ -49,7 +49,7 @@ func testTunnel(t *testing.T) {
p := profileName(t)
mk := NewMinikubeRunner(t, p, "--wait=false")
go func() {
output, stderr := mk.RunCommand("tunnel --alsologtostderr -v 8 --logtostderr", true)
output, stderr := mk.MustRun("tunnel --alsologtostderr -v 8 --logtostderr")
if t.Failed() {
t.Errorf("tunnel stderr : %s", stderr)
t.Errorf("tunnel output : %s", output)

View File

@ -25,7 +25,7 @@ import (
func TestFunctional(t *testing.T) {
p := profileName(t)
mk := NewMinikubeRunner(t, p)
mk.StartWithFail()
mk.MustStart()
if !isTestNoneDriver(t) { // none driver doesn't need to be deleted
defer mk.TearDown(t)
}

View File

@ -29,10 +29,9 @@ func TestISO(t *testing.T) {
if shouldRunInParallel(t) {
t.Parallel()
}
mk := NewMinikubeRunner(t, p, "--wait=false")
mk.RunCommand("delete", false)
stdout, stderr := mk.StartWithFail()
mk.RunCommand("delete", false) // will error if not exist, but ignored.
stdout, stderr := mk.MustStart()
if err != nil {
t.Fatalf("failed to start minikube (for profile %s) %s) failed : %v\nstdout: %s\nstderr: %s", t.Name(), err, stdout, stderr)
}

View File

@ -47,7 +47,7 @@ func TestNone(t *testing.T) {
p := profileName(t)
mk := NewMinikubeRunner(t, p, "--wait=false")
mk.RunCommand("delete", false)
stdout, stderr := mk.StartWithFail()
stdout, stderr := mk.MustStart()
msg := "Configuring local host environment"
if !strings.Contains(stdout, msg) {
t.Errorf("Expected: stdout to contain %q, got: %s", msg, stdout)

View File

@ -38,7 +38,7 @@ func TestPersistence(t *testing.T) {
mk := NewMinikubeRunner(t, p, "--wait=false")
defer mk.TearDown(t)
mk.StartWithFail()
mk.MustStart()
kr := util.NewKubectlRunner(t, p)
if _, err := kr.RunCommand([]string{"create", "-f", filepath.Join(*testdataDir, "busybox.yaml")}); err != nil {
t.Fatalf("creating busybox pod: %s", err)
@ -52,10 +52,10 @@ func TestPersistence(t *testing.T) {
// Make sure everything is up before we stop.
verifyBusybox(t)
mk.RunCommand("stop", true)
mk.MustRun("stop")
mk.CheckStatus(state.Stopped.String())
mk.StartWithFail()
mk.MustStart()
mk.CheckStatus(state.Running.String())
// Make sure the same things come up after we've restarted.

View File

@ -23,11 +23,9 @@ import (
"net"
"strings"
"testing"
"time"
"github.com/docker/machine/libmachine/state"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/util/retry"
)
func TestStartStop(t *testing.T) {
@ -86,38 +84,25 @@ func TestStartStop(t *testing.T) {
t.Skipf("skipping %s - incompatible with none driver", t.Name())
}
mk.RunCommand("config set WantReportErrorPrompt false", true)
mk.StartWithFail(tc.args...)
mk.MustRun("config set WantReportErrorPrompt false")
mk.MustStart(tc.args...)
mk.CheckStatus(state.Running.String())
ip, stderr := mk.RunCommand("ip", true)
ip, stderr := mk.MustRun("ip")
ip = strings.TrimRight(ip, "\n")
if net.ParseIP(ip) == nil {
t.Fatalf("IP command returned an invalid address: %s \n %s", ip, stderr)
}
stop := func() error {
_, _, err := mk.RunCommandRetriable("stop", true)
if err != nil {
t.Errorf("minikube stop error %v ( will retry up to 3 times) ", err)
}
err = mk.CheckStatusNoFail(state.Stopped.String())
if err != nil {
t.Errorf("expected status to be stoped but got error %v ", err)
}
return err
}
err := retry.Expo(stop, 10*time.Second, 5*time.Minute, 3) // max retry 3
mk.MustRun("stop")
err := mk.CheckStatusNoFail(state.Stopped.String())
if err != nil {
t.Errorf("expected status to be stoped but got error: %v ", err)
t.Errorf("expected status to be %s but got error %v ", state.Stopped.String(), err)
}
mk.CheckStatus(state.Stopped.String())
mk.StartWithFail(tc.args...)
mk.MustStart(tc.args...)
mk.CheckStatus(state.Running.String())
mk.RunCommand("delete", true)
mk.MustRun("delete")
mk.CheckStatus(state.None.String())
})
}

View File

@ -98,8 +98,17 @@ func (m *MinikubeRunner) teeRun(cmd *exec.Cmd, waitForRun ...bool) (string, stri
return "", "", err
}
// MustRun executes a command and fails if error, and and unless waitForRun is set to false it waits for it finish.
func (m *MinikubeRunner) MustRun(cmdStr string, waitForRun ...bool) (string, string) {
stdout, stderr, err := m.RunCommand(cmdStr, true, waitForRun...)
if err != nil {
m.T.Logf("MusRun error: %v", err)
}
return stdout, stderr
}
// RunCommand executes a command, optionally checking for error and by default waits for run to finish
func (m *MinikubeRunner) RunCommand(cmdStr string, failError bool, waitForRun ...bool) (string, string) {
func (m *MinikubeRunner) RunCommand(cmdStr string, failError bool, waitForRun ...bool) (string, string, error) {
profileArg := fmt.Sprintf("-p=%s ", m.Profile)
cmdStr = profileArg + cmdStr
cmdArgs := strings.Split(cmdStr, " ")
@ -109,39 +118,17 @@ func (m *MinikubeRunner) RunCommand(cmdStr string, failError bool, waitForRun ..
Logf("Run: %s", cmd.Args)
stdout, stderr, err := m.teeRun(cmd, waitForRun...)
if err != nil {
errMsg := ""
exitCode := ""
if exitError, ok := err.(*exec.ExitError); ok {
errMsg = fmt.Sprintf("Error RunCommand: %q : %q. Output: %q Stderr: %q", cmdStr, exitError.Stderr, stdout, stderr)
} else {
errMsg = fmt.Sprintf("Error RunCommand: %q : %q. Output: %q", cmdStr, stderr, stdout)
exitCode = string(exitError.Stderr)
}
errMsg := fmt.Sprintf("Error RunCommand : %s \n\t Begin RunCommand log block ---> \n\t With Profile: %s \n\t With ExitCode: %q \n\t With STDOUT %s \n\t With STDERR %s \n\t <--- End of RunCommand log block", cmdStr, m.Profile, exitCode, stdout, stderr)
if failError {
m.T.Fatalf(errMsg)
} else {
m.T.Errorf(errMsg)
}
}
return stdout, stderr
}
// RunCommandRetriable executes a command, returns error
// the purpose of this command is to make it retriable by returning error
func (m *MinikubeRunner) RunCommandRetriable(cmdStr string, waitForRun ...bool) (stdout string, stderr string, err error) {
profileArg := fmt.Sprintf("-p=%s ", m.Profile)
cmdStr = profileArg + cmdStr
cmdArgs := strings.Split(cmdStr, " ")
path, _ := filepath.Abs(m.BinaryPath)
cmd := exec.Command(path, cmdArgs...)
Logf("RunCommandRetriable: %s", cmd.Args)
stdout, stderr, err = m.teeRun(cmd, waitForRun...)
if err != nil {
if exitError, ok := err.(*exec.ExitError); ok {
m.T.Logf("temporary error: running command: %s %s. Output: \n%s", cmdStr, exitError.Stderr, stdout)
} else {
m.T.Logf("temporary error: running command: %s %s. Output: \n%s", cmdStr, stderr, stdout)
}
}
return stdout, stderr, err
}
@ -237,11 +224,11 @@ func (m *MinikubeRunner) start(opts ...string) (stdout string, stderr string, er
}
// StartWithFail starts the cluster and fail the test if error
func (m *MinikubeRunner) StartWithFail(opts ...string) (stdout string, stderr string) {
func (m *MinikubeRunner) MustStart(opts ...string) (stdout string, stderr string) {
stdout, stderr, err := m.start(opts...)
// the reason for this formatting is, the logs are very big but useful and also in parallel testing logs are harder to identify
if err != nil {
m.T.Fatalf("%s Failed to start minikube With error: %v \n\t Start log block ---> \n\t With Profile: %s \n\t With Args: %v \n\t With Global Args: %s \n\t With Driver Args: %s \n\t With STDOUT: \n \t %s \n\t With STDERR: \n \t %s \n\t <--- End of log block", m.T.Name(), err, m.Profile, strings.Join(opts, " "), m.GlobalArgs, m.StartArgs, stdout, stderr)
m.T.Fatalf("%s Failed to start minikube With error: %v \n\t begin Start log block ---> \n\t With Profile: %s \n\t With Args: %v \n\t With Global Args: %s \n\t With Driver Args: %s \n\t With STDOUT: \n \t %s \n\t With STDERR: \n \t %s \n\t <--- End of start log block", m.T.Name(), err, m.Profile, strings.Join(opts, " "), m.GlobalArgs, m.StartArgs, stdout, stderr)
}
return stdout, stderr
}
@ -284,24 +271,25 @@ func (m *MinikubeRunner) ParseEnvCmdOutput(out string) map[string]string {
// Status returns the status of a service
func (m *MinikubeRunner) Status() (status string, stderr string, err error) {
cmd := fmt.Sprintf("status --format={{.Host}} %s", m.GlobalArgs)
s := func() error {
status, stderr, err = m.RunCommandRetriable(cmd)
status, stderr, err = m.RunCommand("status --format={{.Host}} %s", false)
status = strings.TrimRight(status, "\n")
if err != nil && (status == state.None.String() || status == state.Stopped.String()) {
err = nil // because https://github.com/kubernetes/minikube/issues/4932
}
return err
}
err = retry.Expo(s, 3*time.Second, 2*time.Minute)
if err != nil && (status == state.None.String() || status == state.Stopped.String()) {
err = nil // because https://github.com/kubernetes/minikube/issues/4932
}
return status, stderr, err
}
// GetLogs returns the logs of a service
func (m *MinikubeRunner) GetLogs() string {
// TODO: this test needs to check sterr too !
stdout, _ := m.RunCommand(fmt.Sprintf("logs %s", m.GlobalArgs), true)
return stdout
func (m *MinikubeRunner) GetLogs() (string, string) {
stdout, stderr, err := m.RunCommand(fmt.Sprintf("logs %s", m.GlobalArgs), true)
if err != nil {
m.T.Logf("Error in GetLogs %v", err)
}
return stdout, stderr
}
// CheckStatus makes sure the service has the desired status, or cause fatal error

View File

@ -83,14 +83,14 @@ func TestVersionUpgrade(t *testing.T) {
mkRelease.StartArgs = strings.Replace(mkRelease.StartArgs, "--wait-timeout=13m", "", 1)
mkRelease.BinaryPath = fname
// For full coverage: also test upgrading from oldest to newest supported k8s release
mkRelease.StartWithFail(fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion))
mkRelease.MustStart(fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion))
mkRelease.CheckStatus(state.Running.String())
mkRelease.RunCommand("stop", true)
mkRelease.MustRun("stop")
mkRelease.CheckStatus(state.Stopped.String())
// Trim the leading "v" prefix to assert that we handle it properly.
mkHead.StartWithFail(fmt.Sprintf("--kubernetes-version=%s", strings.TrimPrefix(constants.NewestKubernetesVersion, "v")))
mkHead.MustStart(fmt.Sprintf("--kubernetes-version=%s", strings.TrimPrefix(constants.NewestKubernetesVersion, "v")))
mkHead.CheckStatus(state.Running.String())
}

View File

@ -112,7 +112,7 @@ func TestProxy(t *testing.T) {
func testProxyWarning(t *testing.T) {
p := profileName(t) // profile name
mk := NewMinikubeRunner(t, p)
stdout, stderr := mk.StartWithFail("--wait=false")
stdout, stderr := mk.MustStart("--wait=false")
msg := "Found network options:"
if !strings.Contains(stdout, msg) {