Remove lint issues from integration tests, mostly by adding error

handlers.
pull/3184/head
Thomas Stromberg 2018-09-28 11:57:48 -07:00
parent 92a435cf4b
commit af61bf790c
8 changed files with 42 additions and 16 deletions

View File

@ -122,8 +122,13 @@ func testIngressController(t *testing.T) {
t.Fatalf(err.Error())
}
defer kubectlRunner.RunCommand([]string{"delete", "-f", podPath})
defer kubectlRunner.RunCommand([]string{"delete", "-f", ingressPath})
defer func() {
for _, p := range []string{podPath, ingressPath} {
if out, err := kubectlRunner.RunCommand([]string{"delete", "-f", p}); err != nil {
t.Logf("delete -f %s failed: %v\noutput: %s\n", p, err, out)
}
}
}()
minikubeRunner.RunCommand("addons disable ingress", true)
}

View File

@ -49,15 +49,21 @@ func testClusterDNS(t *testing.T) {
}
listOpts := metav1.ListOptions{LabelSelector: "integration-test=busybox"}
pods, err := client.CoreV1().Pods("default").List(listOpts)
if err != nil {
t.Fatalf("Unable to list default pods: %v", err)
}
if len(pods.Items) == 0 {
t.Fatal("Expected a busybox pod to be running")
}
podName := pods.Items[0].Name
defer kubectlRunner.RunCommand([]string{"delete", "po", podName})
bbox := pods.Items[0].Name
defer func() {
if out, err := kubectlRunner.RunCommand([]string{"delete", "po", bbox}); err != nil {
t.Logf("delete po %s failed: %v\noutput: %s\n", bbox, err, out)
}
}()
dnsByteArr, err := kubectlRunner.RunCommand([]string{"exec", podName,
"nslookup", "kubernetes"})
dnsByteArr, err := kubectlRunner.RunCommand([]string{"exec", bbox, "nslookup", "kubernetes"})
if err != nil {
t.Fatalf("running nslookup in pod:%s", err)
}

View File

@ -23,7 +23,7 @@ import (
"testing"
"time"
api "k8s.io/api/core/v1"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/minikube/test/integration/util"
)

View File

@ -32,7 +32,8 @@ func TestDocker(t *testing.T) {
}
minikubeRunner.RunCommand("delete", false)
startCmd := fmt.Sprintf("start %s %s %s", minikubeRunner.StartArgs, minikubeRunner.Args, "--docker-env=FOO=BAR --docker-env=BAZ=BAT --docker-opt=debug --docker-opt=icc=true")
startCmd := fmt.Sprintf("start %s %s %s", minikubeRunner.StartArgs, minikubeRunner.Args,
"--docker-env=FOO=BAR --docker-env=BAZ=BAT --docker-opt=debug --docker-opt=icc=true")
minikubeRunner.RunCommand(startCmd, true)
minikubeRunner.EnsureRunning()

View File

@ -32,7 +32,7 @@ func TestMain(m *testing.M) {
var binaryPath = flag.String("binary", "../../out/minikube", "path to minikube binary")
var args = flag.String("minikube-args", "", "Arguments to pass to minikube")
var startArgs = flag.String("minikube-start-args", "", "Arguments to pass to minikube start")
var testdataDir = flag.String("testdata-dir", "testdata", "the directory relative to test/integration where the testdata lives")
var testdataDir = flag.String("testdata-dir", "testdata", "source of testdata relative to test/integration")
func NewMinikubeRunner(t *testing.T) util.MinikubeRunner {
return util.MinikubeRunner{

View File

@ -41,6 +41,6 @@ func TestFunctional(t *testing.T) {
t.Run("EnvVars", testClusterEnv)
t.Run("SSH", testClusterSSH)
t.Run("IngressController", testIngressController)
// t.Run("Mounting", testMounting)
t.Run("Mounting", testMounting)
}
}

View File

@ -47,7 +47,12 @@ func testMounting(t *testing.T) {
mountCmd := fmt.Sprintf("mount %s:/mount-9p", tempDir)
cmd := minikubeRunner.RunDaemon(mountCmd)
defer cmd.Process.Kill()
defer func() {
err := cmd.Process.Kill()
if err != nil {
t.Logf("Failed to kill mount command: %v", err)
}
}()
kubectlRunner := util.NewKubectlRunner(t)
podName := "busybox-mount"
@ -71,7 +76,11 @@ func testMounting(t *testing.T) {
}
return nil
}
defer kubectlRunner.RunCommand([]string{"delete", "-f", podPath})
defer func() {
if out, err := kubectlRunner.RunCommand([]string{"delete", "-f", podPath}); err != nil {
t.Logf("delete -f %s failed: %v\noutput: %s\n", podPath, err, out)
}
}()
if err := util.Retry(t, setupTest, 5*time.Second, 40); err != nil {
t.Fatal("mountTest failed with error:", err)

View File

@ -43,7 +43,9 @@ func testProvisioning(t *testing.T) {
kubectlRunner := util.NewKubectlRunner(t)
defer func() {
kubectlRunner.RunCommand([]string{"delete", "pvc", pvcName})
if out, err := kubectlRunner.RunCommand([]string{"delete", "pvc", pvcName}); err != nil {
t.Logf("delete pvc %s failed: %v\noutput: %s\n", pvcName, err, out)
}
}()
// We have to make sure the addon-manager has created the StorageClass before creating
@ -51,15 +53,18 @@ func testProvisioning(t *testing.T) {
checkStorageClass := func() error {
scl := storage.StorageClassList{}
kubectlRunner.RunCommandParseOutput([]string{"get", "storageclass"}, &scl)
if err := kubectlRunner.RunCommandParseOutput([]string{"get", "storageclass"}, &scl); err != nil {
return fmt.Errorf("get storageclass: %v", err)
}
if len(scl.Items) > 0 {
return nil
}
return fmt.Errorf("No default StorageClass yet.")
return fmt.Errorf("no StorageClass yet")
}
if err := util.Retry(t, checkStorageClass, 5*time.Second, 20); err != nil {
t.Fatalf("No default storage class: %s", err)
t.Fatalf("no default storage class after retry: %s", err)
}
// Check that the storage provisioner pod is running