Unified StartFail Logging, more t.Error and Added download only test,

pull/5150/head
Medya Gh 2019-08-21 08:45:17 -07:00
parent 5f0b0c1b90
commit eef31403b4
11 changed files with 69 additions and 64 deletions

View File

@ -43,22 +43,44 @@ func TestDownloadOnly(t *testing.T) {
if !isTestNoneDriver(t) { // none driver doesnt need to be deleted
defer mk.TearDown(t)
}
minHome := constants.GetMinipath()
t.Run("Oldest", func(t *testing.T) {
stdout, stderr, err := mk.Start("--download-only", fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion))
if err != nil {
t.Errorf("%s minikube --download-only failed : %v\nstdout: %s\nstderr: %s", p, err, stdout, stderr)
}
mk.StartWithFail("--download-only", fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion))
})
t.Run("Newest", func(t *testing.T) {
stdout, stderr, err := mk.Start("--download-only", fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion))
if err != nil {
t.Errorf("%s minikube --download-only failed : %v\nstdout: %s\nstderr: %s", p, err, stdout, stderr)
v := constants.NewestKubernetesVersion
mk.StartWithFail("--download-only", fmt.Sprintf("--kubernetes-version=%s", v))
// checking binaries downloaded
_, imgs := constants.GetKubeadmCachedImages("", v)
for _, img := range imgs {
_, err := os.Stat(filepath.Join(minHome, fmt.Sprintf("images/%s", img)))
if err != nil {
t.Errorf("error expected download-only to cachne image %q but got error %v", img, err)
}
}
// TODO: add test to check if files are downloaded
// checking binaries downloaded (kubelet,kubeadm)
for _, bin := range constants.GetKubeadmCachedBinaries() {
_, err := os.Stat(filepath.Join(minHome, fmt.Sprintf("cache/%s/%s", v, bin)))
if err != nil {
t.Errorf("error expected download-only to cachne binary %q but got error %v", bin, err)
}
}
// checking binaries downloaded
for _, bin := range []string{"kublet,kbueadm"} {
_, err := os.Stat(filepath.Join(minHome, fmt.Sprintf("cache/%s/%s", v, bin)))
if err != nil {
t.Errorf("error expected download-only to cachne binary %q but got error %v", bin, err)
}
}
})
// this downloads the latest published binary from where we publish the minikube binary
t.Run("DownloadLatestRelease", func(t *testing.T) {
dest := filepath.Join(*testdataDir, fmt.Sprintf("minikube-%s-%s-latest-stable", runtime.GOOS, runtime.GOARCH))
err := downloadMinikubeBinary(t, dest, "latest")

View File

@ -48,11 +48,7 @@ func testGvisorRestart(t *testing.T) {
mk := NewMinikubeRunner(t, p, "--wait=false")
defer mk.TearDown(t)
stdout, stderr, err := mk.Start("--container-runtime=containerd", "--docker-opt containerd=/var/run/containerd/containerd.sock")
if err != nil {
t.Fatalf("failed to start minikube (for profile %s) failed : %v\nstdout: %s\nstderr: %s", p, err, stdout, stderr)
}
mk.StartWithFail("--container-runtime=containerd", "--docker-opt containerd=/var/run/containerd/containerd.sock")
mk.RunCommand("cache add gcr.io/k8s-minikube/gvisor-addon:latest", true)
mk.RunCommand("addons enable gvisor", true)
@ -69,10 +65,7 @@ func testGvisorRestart(t *testing.T) {
deleteUntrustedWorkload(t, p)
mk.RunCommand("delete", true)
stdout, stderr, err = mk.Start("--container-runtime=containerd", "--docker-opt containerd=/var/run/containerd/containerd.sock")
if err != nil {
t.Fatalf("failed to start minikube (for profile %s) failed : %v \nstdout: %s \nstderr: %s", t.Name(), err, stdout, stderr)
}
mk.StartWithFail("--container-runtime=containerd", "--docker-opt containerd=/var/run/containerd/containerd.sock")
mk.CheckStatus(state.Running.String())
t.Log("waiting for gvisor controller to come up")

View File

@ -46,14 +46,11 @@ func TestDocker(t *testing.T) {
t.Logf("pre-delete failed (probably ok): %v", err)
}
stdout, stderr, err := mk.Start("--docker-env=FOO=BAR", "--docker-env=BAZ=BAT", "--docker-opt=debug", " --docker-opt=icc=true")
if err != nil {
t.Fatalf("TestDocker minikube start failed : %v\nstdout: %s\nstderr: %s", err, stdout, stderr)
}
mk.StartWithFail("--docker-env=FOO=BAR", "--docker-env=BAZ=BAT", "--docker-opt=debug", " --docker-opt=icc=true")
mk.CheckStatus(state.Running.String())
stdout, stderr, err = mk.RunWithContext(ctx, "ssh -- systemctl show docker --property=Environment --no-pager")
stdout, stderr, err := mk.RunWithContext(ctx, "ssh -- systemctl show docker --property=Environment --no-pager")
if err != nil {
t.Errorf("docker env: %v\nstderr: %s", err, stderr)
}

View File

@ -25,10 +25,7 @@ import (
func TestFunctional(t *testing.T) {
p := profileName(t)
mk := NewMinikubeRunner(t, p)
stdout, stderr, err := mk.Start()
if err != nil {
t.Fatalf("failed to start minikube failed : %v\nstdout: %s\nstderr: %s", err, stdout, stderr)
}
mk.StartWithFail()
if !isTestNoneDriver(t) { // none driver doesn't need to be deleted
defer mk.TearDown(t)
}

View File

@ -32,7 +32,7 @@ func TestISO(t *testing.T) {
mk := NewMinikubeRunner(t, p, "--wait=false")
mk.RunCommand("delete", false)
stdout, stderr, err := mk.Start()
stdout, stderr := mk.StartWithFail()
if err != nil {
t.Fatalf("failed to start minikube (for profile %s) %s) failed : %v\nstdout: %s\nstderr: %s", t.Name(), err, stdout, stderr)
}

View File

@ -47,7 +47,7 @@ func TestNone(t *testing.T) {
p := profileName(t)
mk := NewMinikubeRunner(t, p, "--wait=false")
mk.RunCommand("delete", false)
stdout, stderr, err := mk.Start()
stdout, stderr := mk.StartWithFail()
if err != nil {
t.Fatalf("failed to start minikube (for profile %s) failed : %v\nstdout: %s\nstderr: %s", p, err, stdout, stderr)
}

View File

@ -38,10 +38,7 @@ func TestPersistence(t *testing.T) {
mk := NewMinikubeRunner(t, p, "--wait=false")
defer mk.TearDown(t)
stdout, stderr, err := mk.Start()
if err != nil {
t.Fatalf("failed to start minikube (for profile %s) failed : %v\nstdout: %s\nstderr: %s", t.Name(), err, stdout, stderr)
}
mk.StartWithFail()
kr := util.NewKubectlRunner(t, p)
if _, err := kr.RunCommand([]string{"create", "-f", filepath.Join(*testdataDir, "busybox.yaml")}); err != nil {
t.Fatalf("creating busybox pod: %s", err)
@ -58,10 +55,7 @@ func TestPersistence(t *testing.T) {
mk.RunCommand("stop", true)
mk.CheckStatus(state.Stopped.String())
stdout, stderr, err = mk.Start()
if err != nil {
t.Fatalf("failed to start minikube (for profile %s) failed : %v\nstdout: %s\nstderr: %s", t.Name(), err, stdout, stderr)
}
mk.StartWithFail()
mk.CheckStatus(state.Running.String())
// Make sure the same things come up after we've restarted.

View File

@ -87,10 +87,7 @@ func TestStartStop(t *testing.T) {
}
mk.RunCommand("config set WantReportErrorPrompt false", true)
stdout, stderr, err := mk.Start(tc.args...)
if err != nil {
t.Fatalf("failed to start minikube (for profile %s) failed : %v\nstdout: %s\nstderr: %s", pn, err, stdout, stderr)
}
mk.StartWithFail(tc.args...)
mk.CheckStatus(state.Running.String())
@ -101,20 +98,25 @@ func TestStartStop(t *testing.T) {
}
stop := func() error {
stdout, stderr, err = mk.RunCommandRetriable("stop")
return mk.CheckStatusNoFail(state.Stopped.String())
_, _, err := mk.RunCommandRetriable("stop", true)
if err != nil {
t.Errorf("minikube stop error %v ( will retry up to 3 times) ", err)
}
err = mk.CheckStatusNoFail(state.Stopped.String())
if err != nil {
t.Errorf("expected status to be stoped but got error %v ", err)
}
return err
}
err = retry.Expo(stop, 10*time.Second, 5*time.Minute)
mk.CheckStatus(state.Stopped.String())
stdout, stderr, err = mk.Start(tc.args...)
err := retry.Expo(stop, 10*time.Second, 5*time.Minute, 3) // max retry 3
if err != nil {
t.Fatalf("failed to start minikube (for profile %s) failed : %v\nstdout: %s\nstderr: %s", t.Name(), err, stdout, stderr)
t.Errorf("expected status to be stoped but got error: %v ", err)
}
mk.CheckStatus(state.Stopped.String())
mk.StartWithFail(tc.args...)
mk.CheckStatus(state.Running.String())
mk.RunCommand("delete", true)
mk.CheckStatus(state.None.String())
})

View File

@ -228,13 +228,21 @@ func (m *MinikubeRunner) SSH(cmdStr string) (string, error) {
}
// Start starts the cluster
func (m *MinikubeRunner) Start(opts ...string) (stdout string, stderr string, err error) {
func (m *MinikubeRunner) start(opts ...string) (stdout string, stderr string, err error) {
cmd := fmt.Sprintf("start %s %s %s", m.StartArgs, m.GlobalArgs, strings.Join(opts, " "))
ctx := context.Background()
ctx, cancel := context.WithTimeout(ctx, m.TimeOutStart)
defer cancel()
stdout, stderr, err = m.RunWithContext(ctx, cmd, true)
return stdout, stderr, err
return m.RunWithContext(ctx, cmd, true)
}
// StartWithFail starts the cluster and fail the test if error
func (m *MinikubeRunner) StartWithFail(opts ...string) (stdout string, stderr string) {
stdout, stderr, err := m.start(opts...)
if err != nil {
m.T.Fatalf("%s Failed to start minikube (for profile %s) error: %v \n\twith opts %v, \n\t Global Args: %s \n\t Driver Args: %s \n\t STDOUT: \n\t\t %s \n\t STDERR: \n\t\t %s", m.T.Name(), m.Profile, err, strings.Join(opts, " "), m.GlobalArgs, m.StartArgs, stdout, stderr)
}
return stdout, stderr
}
// TearDown deletes minikube without waiting for it. used to free up ram/cpu after each test
@ -255,7 +263,7 @@ func (m *MinikubeRunner) EnsureRunning(opts ...string) {
m.T.Errorf("error getting status for ensure running: %v", err)
}
if s != state.Running.String() {
stdout, stderr, err := m.Start(opts...)
stdout, stderr, err := m.start(opts...)
if err != nil {
m.T.Errorf("error starting while running EnsureRunning : %v , stdout %s stderr %s", err, stdout, stderr)
}

View File

@ -83,19 +83,14 @@ func TestVersionUpgrade(t *testing.T) {
mkRelease.StartArgs = strings.Replace(mkRelease.StartArgs, "--wait-timeout=13m", "", 1)
mkRelease.BinaryPath = fname
// For full coverage: also test upgrading from oldest to newest supported k8s release
stdout, stderr, err := mkRelease.Start(fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion))
if err != nil {
t.Fatalf("minikube start (%s) failed : %v\nstdout: %s\nstderr: %s", t.Name(), err, stdout, stderr)
}
mkRelease.StartWithFail(fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion))
mkRelease.CheckStatus(state.Running.String())
mkRelease.RunCommand("stop", true)
mkRelease.CheckStatus(state.Stopped.String())
// Trim the leading "v" prefix to assert that we handle it properly.
stdout, stderr, err = mkHead.Start(fmt.Sprintf("--kubernetes-version=%s", strings.TrimPrefix(constants.NewestKubernetesVersion, "v")))
if err != nil {
t.Fatalf("TestVersionUpgrade mkCurrent.Start start failed : %v\nstdout: %s\nstderr: %s", err, stdout, stderr)
}
mkHead.StartWithFail(fmt.Sprintf("--kubernetes-version=%s", strings.TrimPrefix(constants.NewestKubernetesVersion, "v")))
mkHead.CheckStatus(state.Running.String())
}

View File

@ -112,10 +112,7 @@ func TestProxy(t *testing.T) {
func testProxyWarning(t *testing.T) {
p := profileName(t) // profile name
mk := NewMinikubeRunner(t, p)
stdout, stderr, err := mk.Start("--wait=false")
if err != nil {
t.Fatalf("failed to start minikube (for profile %s) failed : %v\nstdout: %s\nstderr: %s", t.Name(), err, stdout, stderr)
}
stdout, stderr := mk.StartWithFail("--wait=false")
msg := "Found network options:"
if !strings.Contains(stdout, msg) {