Merge pull request #1938 from r2d4/integration-test-cleanup-
Integration test cleanuppull/1941/head
commit
3180e2e897
|
@ -152,15 +152,15 @@ test-iso:
|
|||
|
||||
.PHONY: integration
|
||||
integration: out/minikube
|
||||
go test -v -test.timeout=30m $(REPOPATH)/test/integration --tags="$(MINIKUBE_INTEGRATION_BUILD_TAGS)" --minikube-args="$(MINIKUBE_ARGS)"
|
||||
go test -v -test.timeout=30m $(REPOPATH)/test/integration --tags="$(MINIKUBE_INTEGRATION_BUILD_TAGS)" $(TEST_ARGS)
|
||||
|
||||
.PHONY: integration-none-driver
|
||||
integration-none-driver: e2e-linux-amd64 out/minikube-linux-amd64
|
||||
sudo -E out/e2e-linux-amd64 -testdata-dir "test/integration/testdata" -minikube-args="--vm-driver=none --alsologtostderr" -test.v -test.timeout=30m -binary=out/minikube-linux-amd64
|
||||
sudo -E out/e2e-linux-amd64 -testdata-dir "test/integration/testdata" -minikube-start-args="--vm-driver=none" -test.v -test.timeout=30m -binary=out/minikube-linux-amd64 $(TEST_ARGS)
|
||||
|
||||
.PHONY: integration-versioned
|
||||
integration-versioned: out/minikube
|
||||
go test -v -test.timeout=30m $(REPOPATH)/test/integration --tags="$(MINIKUBE_INTEGRATION_BUILD_TAGS) versioned" --minikube-args="$(MINIKUBE_ARGS)"
|
||||
go test -v -test.timeout=30m $(REPOPATH)/test/integration --tags="$(MINIKUBE_INTEGRATION_BUILD_TAGS) versioned" $(TEST_ARGS)
|
||||
|
||||
.PHONY: test
|
||||
test: pkg/minikube/assets/assets.go
|
||||
|
|
|
@ -83,7 +83,7 @@ find ~/.minikube || true
|
|||
|
||||
# Allow this to fail, we'll switch on the return code below.
|
||||
set +e
|
||||
${SUDO_PREFIX}out/e2e-${OS_ARCH} -minikube-args="--vm-driver=${VM_DRIVER} --v=10 --logtostderr" -test.v -test.timeout=30m -binary=out/minikube-${OS_ARCH}
|
||||
${SUDO_PREFIX}out/e2e-${OS_ARCH} -minikube-start-args="--vm-driver=${VM_DRIVER}" -minikube-args="--v=10 --logtostderr ${EXTRA_ARGS}" -test.v -test.timeout=30m -binary=out/minikube-${OS_ARCH}
|
||||
result=$?
|
||||
set -e
|
||||
|
||||
|
|
|
@ -18,7 +18,6 @@ package util
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
@ -30,11 +29,13 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
@ -109,31 +110,36 @@ func StartPods(c kubernetes.Interface, namespace string, pod v1.Pod, waitForRunn
|
|||
// Wait up to 10 minutes for all matching pods to become Running and at least one
|
||||
// matching pod exists.
|
||||
func WaitForPodsWithLabelRunning(c kubernetes.Interface, ns string, label labels.Selector) error {
|
||||
running := false
|
||||
PodStore := NewPodStore(c, ns, label, fields.Everything())
|
||||
defer PodStore.Stop()
|
||||
waitLoop:
|
||||
for start := time.Now(); time.Since(start) < 10*time.Minute; time.Sleep(250 * time.Millisecond) {
|
||||
pods := PodStore.List()
|
||||
if len(pods) == 0 {
|
||||
continue waitLoop
|
||||
lastKnownPodNumber := -1
|
||||
return wait.PollImmediate(constants.APICallRetryInterval, time.Minute*10, func() (bool, error) {
|
||||
listOpts := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.CoreV1().Pods(ns).List(listOpts)
|
||||
if err != nil {
|
||||
glog.Infof("error getting Pods with label selector %q [%v]\n", label.String(), err)
|
||||
return false, nil
|
||||
}
|
||||
for _, p := range pods {
|
||||
if p.Status.Phase != v1.PodRunning {
|
||||
continue waitLoop
|
||||
|
||||
if lastKnownPodNumber != len(pods.Items) {
|
||||
glog.Infof("Found %d Pods for label selector %s\n", len(pods.Items), label.String())
|
||||
lastKnownPodNumber = len(pods.Items)
|
||||
}
|
||||
|
||||
if len(pods.Items) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, pod := range pods.Items {
|
||||
if pod.Status.Phase != v1.PodRunning {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
running = true
|
||||
break
|
||||
}
|
||||
if !running {
|
||||
return fmt.Errorf("Timeout while waiting for pods with labels %q to be running", label.String())
|
||||
}
|
||||
return nil
|
||||
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForRCToStabilize waits till the RC has a matching generation/replica count between spec and status.
|
||||
func WaitForRCToStabilize(t *testing.T, c kubernetes.Interface, ns, name string, timeout time.Duration) error {
|
||||
func WaitForRCToStabilize(c kubernetes.Interface, ns, name string, timeout time.Duration) error {
|
||||
options := metav1.ListOptions{FieldSelector: fields.Set{
|
||||
"metadata.name": name,
|
||||
"metadata.namespace": ns,
|
||||
|
@ -154,7 +160,7 @@ func WaitForRCToStabilize(t *testing.T, c kubernetes.Interface, ns, name string,
|
|||
*(rc.Spec.Replicas) == rc.Status.Replicas {
|
||||
return true, nil
|
||||
}
|
||||
t.Logf("Waiting for rc %s to stabilize, generation %v observed generation %v spec.replicas %d status.replicas %d",
|
||||
glog.Infof("Waiting for rc %s to stabilize, generation %v observed generation %v spec.replicas %d status.replicas %d",
|
||||
name, rc.Generation, rc.Status.ObservedGeneration, *(rc.Spec.Replicas), rc.Status.Replicas)
|
||||
}
|
||||
return false, nil
|
||||
|
@ -163,21 +169,21 @@ func WaitForRCToStabilize(t *testing.T, c kubernetes.Interface, ns, name string,
|
|||
}
|
||||
|
||||
// WaitForService waits until the service appears (exist == true), or disappears (exist == false)
|
||||
func WaitForService(t *testing.T, c kubernetes.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error {
|
||||
func WaitForService(c kubernetes.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error {
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
_, err := c.Core().Services(namespace).Get(name, metav1.GetOptions{})
|
||||
switch {
|
||||
case err == nil:
|
||||
t.Logf("Service %s in namespace %s found.", name, namespace)
|
||||
glog.Infof("Service %s in namespace %s found.", name, namespace)
|
||||
return exist, nil
|
||||
case apierrs.IsNotFound(err):
|
||||
t.Logf("Service %s in namespace %s disappeared.", name, namespace)
|
||||
glog.Infof("Service %s in namespace %s disappeared.", name, namespace)
|
||||
return !exist, nil
|
||||
case !IsRetryableAPIError(err):
|
||||
t.Logf("Non-retryable failure while getting service.")
|
||||
glog.Infof("Non-retryable failure while getting service.")
|
||||
return false, err
|
||||
default:
|
||||
t.Logf("Get service %s in namespace %s failed: %v", name, namespace, err)
|
||||
glog.Infof("Get service %s in namespace %s failed: %v", name, namespace, err)
|
||||
return false, nil
|
||||
}
|
||||
})
|
||||
|
@ -189,9 +195,9 @@ func WaitForService(t *testing.T, c kubernetes.Interface, namespace, name string
|
|||
}
|
||||
|
||||
//WaitForServiceEndpointsNum waits until the amount of endpoints that implement service to expectNum.
|
||||
func WaitForServiceEndpointsNum(t *testing.T, c kubernetes.Interface, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error {
|
||||
func WaitForServiceEndpointsNum(c kubernetes.Interface, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error {
|
||||
return wait.Poll(interval, timeout, func() (bool, error) {
|
||||
t.Logf("Waiting for amount of service:%s endpoints to be %d", serviceName, expectNum)
|
||||
glog.Infof("Waiting for amount of service:%s endpoints to be %d", serviceName, expectNum)
|
||||
list, err := c.Core().Endpoints(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
|
@ -27,28 +27,25 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
pkgutil "k8s.io/minikube/pkg/util"
|
||||
"k8s.io/minikube/test/integration/util"
|
||||
)
|
||||
|
||||
func testAddons(t *testing.T) {
|
||||
t.Parallel()
|
||||
client, err := util.GetClient()
|
||||
client, err := pkgutil.GetClient()
|
||||
if err != nil {
|
||||
t.Fatalf("Could not get kubernetes client: %s", err)
|
||||
}
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"component": "kube-addon-manager"}))
|
||||
if err := util.WaitForPodsWithLabelRunning(client, "kube-system", selector); err != nil {
|
||||
if err := pkgutil.WaitForPodsWithLabelRunning(client, "kube-system", selector); err != nil {
|
||||
t.Errorf("Error waiting for addon manager to be up")
|
||||
}
|
||||
}
|
||||
|
||||
func testDashboard(t *testing.T) {
|
||||
t.Parallel()
|
||||
minikubeRunner := util.MinikubeRunner{
|
||||
BinaryPath: *binaryPath,
|
||||
Args: *args,
|
||||
T: t,
|
||||
}
|
||||
minikubeRunner := NewMinikubeRunner(t)
|
||||
|
||||
if err := util.WaitForDashboardRunning(t); err != nil {
|
||||
t.Fatalf("waiting for dashboard to be up: %s", err)
|
||||
|
@ -73,10 +70,7 @@ func testDashboard(t *testing.T) {
|
|||
|
||||
func testServicesList(t *testing.T) {
|
||||
t.Parallel()
|
||||
minikubeRunner := util.MinikubeRunner{
|
||||
BinaryPath: *binaryPath,
|
||||
Args: *args,
|
||||
T: t}
|
||||
minikubeRunner := NewMinikubeRunner(t)
|
||||
|
||||
checkServices := func() error {
|
||||
output := minikubeRunner.RunCommand("service list", false)
|
||||
|
|
|
@ -23,6 +23,9 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
pkgutil "k8s.io/minikube/pkg/util"
|
||||
|
||||
"k8s.io/minikube/test/integration/util"
|
||||
)
|
||||
|
||||
|
@ -33,9 +36,12 @@ func testClusterDNS(t *testing.T) {
|
|||
}
|
||||
|
||||
kubectlRunner := util.NewKubectlRunner(t)
|
||||
podName := "busybox"
|
||||
podPath := filepath.Join(*testdataDir, "busybox.yaml")
|
||||
defer kubectlRunner.RunCommand([]string{"delete", "-f", podPath})
|
||||
|
||||
client, err := pkgutil.GetClient()
|
||||
if err != nil {
|
||||
t.Fatalf("Error getting kubernetes client %s", err)
|
||||
}
|
||||
|
||||
if _, err := kubectlRunner.RunCommand([]string{"create", "-f", podPath}); err != nil {
|
||||
t.Fatalf("creating busybox pod: %s", err)
|
||||
|
@ -44,6 +50,14 @@ func testClusterDNS(t *testing.T) {
|
|||
if err := util.WaitForBusyboxRunning(t, "default"); err != nil {
|
||||
t.Fatalf("Waiting for busybox pod to be up: %s", err)
|
||||
}
|
||||
listOpts := metav1.ListOptions{LabelSelector: "integration-test=busybox"}
|
||||
pods, err := client.CoreV1().Pods("default").List(listOpts)
|
||||
if len(pods.Items) == 0 {
|
||||
t.Fatal("Expected a busybox pod to be running")
|
||||
}
|
||||
|
||||
podName := pods.Items[0].Name
|
||||
defer kubectlRunner.RunCommand([]string{"delete", "po", podName})
|
||||
|
||||
dnsByteArr, err := kubectlRunner.RunCommand([]string{"exec", podName,
|
||||
"nslookup", "kubernetes"})
|
||||
|
|
|
@ -29,10 +29,7 @@ import (
|
|||
func testClusterEnv(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
minikubeRunner := util.MinikubeRunner{
|
||||
Args: *args,
|
||||
BinaryPath: *binaryPath,
|
||||
T: t}
|
||||
minikubeRunner := NewMinikubeRunner(t)
|
||||
|
||||
dockerEnvVars := minikubeRunner.RunCommand("docker-env", true)
|
||||
if err := minikubeRunner.SetEnvFromEnvCmdOutput(dockerEnvVars); err != nil {
|
||||
|
|
|
@ -21,19 +21,14 @@ package integration
|
|||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/minikube/test/integration/util"
|
||||
)
|
||||
|
||||
func testClusterLogs(t *testing.T) {
|
||||
t.Parallel()
|
||||
minikubeRunner := util.MinikubeRunner{
|
||||
Args: *args,
|
||||
BinaryPath: *binaryPath,
|
||||
T: t}
|
||||
minikubeRunner := NewMinikubeRunner(t)
|
||||
minikubeRunner.EnsureRunning()
|
||||
logsCmdOutput := minikubeRunner.GetLogs()
|
||||
|
||||
logsCmdOutput := minikubeRunner.RunCommand("logs", true)
|
||||
//check for # of lines or check for strings
|
||||
logWords := []string{"minikube", ".go"}
|
||||
for _, logWord := range logWords {
|
||||
|
|
|
@ -21,17 +21,11 @@ package integration
|
|||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/minikube/test/integration/util"
|
||||
)
|
||||
|
||||
func testClusterSSH(t *testing.T) {
|
||||
t.Parallel()
|
||||
minikubeRunner := util.MinikubeRunner{
|
||||
Args: *args,
|
||||
BinaryPath: *binaryPath,
|
||||
T: t}
|
||||
|
||||
minikubeRunner := NewMinikubeRunner(t)
|
||||
expectedStr := "hello"
|
||||
sshCmdOutput := minikubeRunner.RunCommand("ssh echo "+expectedStr, true)
|
||||
if !strings.Contains(sshCmdOutput, expectedStr) {
|
||||
|
|
|
@ -22,22 +22,17 @@ import (
|
|||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/minikube/test/integration/util"
|
||||
)
|
||||
|
||||
func TestDocker(t *testing.T) {
|
||||
minikubeRunner := util.MinikubeRunner{
|
||||
Args: *args,
|
||||
BinaryPath: *binaryPath,
|
||||
T: t}
|
||||
minikubeRunner := NewMinikubeRunner(t)
|
||||
|
||||
if strings.Contains(*args, "--vm-driver=none") {
|
||||
if strings.Contains(minikubeRunner.StartArgs, "--vm-driver=none") {
|
||||
t.Skip("skipping test as none driver does not bundle docker")
|
||||
}
|
||||
|
||||
minikubeRunner.RunCommand("delete", false)
|
||||
startCmd := fmt.Sprintf("start %s %s", minikubeRunner.Args, "--docker-env=FOO=BAR --docker-env=BAZ=BAT --docker-opt=debug --docker-opt=icc=true")
|
||||
startCmd := fmt.Sprintf("start %s %s %s", minikubeRunner.StartArgs, minikubeRunner.Args, "--docker-env=FOO=BAR --docker-env=BAZ=BAT --docker-opt=debug --docker-opt=icc=true")
|
||||
minikubeRunner.RunCommand(startCmd, true)
|
||||
minikubeRunner.EnsureRunning()
|
||||
|
||||
|
|
|
@ -20,6 +20,8 @@ import (
|
|||
"flag"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"k8s.io/minikube/test/integration/util"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
|
@ -29,4 +31,14 @@ func TestMain(m *testing.M) {
|
|||
|
||||
var binaryPath = flag.String("binary", "../../out/minikube", "path to minikube binary")
|
||||
var args = flag.String("minikube-args", "", "Arguments to pass to minikube")
|
||||
var startArgs = flag.String("minikube-start-args", "", "Arguments to pass to minikube start")
|
||||
var testdataDir = flag.String("testdata-dir", "testdata", "the directory relative to test/integration where the testdata lives")
|
||||
|
||||
func NewMinikubeRunner(t *testing.T) util.MinikubeRunner {
|
||||
return util.MinikubeRunner{
|
||||
Args: *args,
|
||||
BinaryPath: *binaryPath,
|
||||
StartArgs: *startArgs,
|
||||
T: t,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,14 +24,10 @@ import (
|
|||
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/test/integration/util"
|
||||
)
|
||||
|
||||
func TestFunctional(t *testing.T) {
|
||||
minikubeRunner := util.MinikubeRunner{
|
||||
BinaryPath: *binaryPath,
|
||||
Args: *args,
|
||||
T: t}
|
||||
minikubeRunner := NewMinikubeRunner(t)
|
||||
minikubeRunner.EnsureRunning()
|
||||
integrationTestImages := []string{"busybox:glibc"}
|
||||
if err := machine.CacheImages(integrationTestImages, constants.ImageCacheDir); err != nil {
|
||||
|
@ -51,7 +47,7 @@ func TestFunctional(t *testing.T) {
|
|||
t.Run("ServicesList", testServicesList)
|
||||
t.Run("Provisioning", testProvisioning)
|
||||
|
||||
if !strings.Contains(*args, "--vm-driver=none") {
|
||||
if !strings.Contains(minikubeRunner.StartArgs, "--vm-driver=none") {
|
||||
t.Run("EnvVars", testClusterEnv)
|
||||
t.Run("SSH", testClusterSSH)
|
||||
// t.Run("Mounting", testMounting)
|
||||
|
|
|
@ -22,16 +22,11 @@ import (
|
|||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/minikube/test/integration/util"
|
||||
)
|
||||
|
||||
func TestISO(t *testing.T) {
|
||||
|
||||
minikubeRunner := util.MinikubeRunner{
|
||||
Args: *args,
|
||||
BinaryPath: *binaryPath,
|
||||
T: t}
|
||||
minikubeRunner := NewMinikubeRunner(t)
|
||||
|
||||
minikubeRunner.RunCommand("delete", true)
|
||||
minikubeRunner.Start()
|
||||
|
@ -42,10 +37,7 @@ func TestISO(t *testing.T) {
|
|||
}
|
||||
|
||||
func testMountPermissions(t *testing.T) {
|
||||
minikubeRunner := util.MinikubeRunner{
|
||||
Args: *args,
|
||||
BinaryPath: *binaryPath,
|
||||
T: t}
|
||||
minikubeRunner := NewMinikubeRunner(t)
|
||||
// test mount permissions
|
||||
mountPoints := []string{"/Users", "/hosthome"}
|
||||
perms := "drwxr-xr-x"
|
||||
|
@ -67,10 +59,7 @@ func testMountPermissions(t *testing.T) {
|
|||
}
|
||||
|
||||
func testPackages(t *testing.T) {
|
||||
minikubeRunner := util.MinikubeRunner{
|
||||
Args: *args,
|
||||
BinaryPath: *binaryPath,
|
||||
T: t}
|
||||
minikubeRunner := NewMinikubeRunner(t)
|
||||
|
||||
packages := []string{
|
||||
"git",
|
||||
|
@ -92,10 +81,7 @@ func testPackages(t *testing.T) {
|
|||
}
|
||||
|
||||
func testPersistence(t *testing.T) {
|
||||
minikubeRunner := util.MinikubeRunner{
|
||||
Args: *args,
|
||||
BinaryPath: *binaryPath,
|
||||
T: t}
|
||||
minikubeRunner := NewMinikubeRunner(t)
|
||||
|
||||
for _, dir := range []string{
|
||||
"/data",
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
pkgutil "k8s.io/minikube/pkg/util"
|
||||
"k8s.io/minikube/test/integration/util"
|
||||
)
|
||||
|
||||
|
@ -36,10 +37,7 @@ func testMounting(t *testing.T) {
|
|||
if strings.Contains(*args, "--vm-driver=none") {
|
||||
t.Skip("skipping test for none driver as it does not need mount")
|
||||
}
|
||||
minikubeRunner := util.MinikubeRunner{
|
||||
Args: *args,
|
||||
BinaryPath: *binaryPath,
|
||||
T: t}
|
||||
minikubeRunner := NewMinikubeRunner(t)
|
||||
|
||||
tempDir, err := ioutil.TempDir("", "mounttest")
|
||||
if err != nil {
|
||||
|
@ -79,12 +77,12 @@ func testMounting(t *testing.T) {
|
|||
t.Fatal("mountTest failed with error:", err)
|
||||
}
|
||||
|
||||
client, err := util.GetClient()
|
||||
client, err := pkgutil.GetClient()
|
||||
if err != nil {
|
||||
t.Fatalf("getting kubernetes client: %s", err)
|
||||
}
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"integration-test": "busybox-mount"}))
|
||||
if err := util.WaitForPodsWithLabelRunning(client, "default", selector); err != nil {
|
||||
if err := pkgutil.WaitForPodsWithLabelRunning(client, "default", selector); err != nil {
|
||||
t.Fatalf("Error waiting for busybox mount pod to be up: %s", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -28,17 +28,14 @@ import (
|
|||
)
|
||||
|
||||
func TestPersistence(t *testing.T) {
|
||||
minikubeRunner := util.MinikubeRunner{BinaryPath: *binaryPath, T: t}
|
||||
minikubeRunner := NewMinikubeRunner(t)
|
||||
minikubeRunner.EnsureRunning()
|
||||
|
||||
kubectlRunner := util.NewKubectlRunner(t)
|
||||
podPath, _ := filepath.Abs("testdata/busybox.yaml")
|
||||
|
||||
podNamespace := kubectlRunner.CreateRandomNamespace()
|
||||
defer kubectlRunner.DeleteNamespace(podNamespace)
|
||||
|
||||
// Create a pod and wait for it to be running.
|
||||
if _, err := kubectlRunner.RunCommand([]string{"create", "-f", podPath, "--namespace=" + podNamespace}); err != nil {
|
||||
if _, err := kubectlRunner.RunCommand([]string{"create", "-f", podPath}); err != nil {
|
||||
t.Fatalf("Error creating test pod: %s", err)
|
||||
}
|
||||
|
||||
|
@ -47,7 +44,7 @@ func TestPersistence(t *testing.T) {
|
|||
t.Fatalf("waiting for dashboard to be up: %s", err)
|
||||
}
|
||||
|
||||
if err := util.WaitForBusyboxRunning(t, podNamespace); err != nil {
|
||||
if err := util.WaitForBusyboxRunning(t, "default"); err != nil {
|
||||
t.Fatalf("waiting for busybox to be up: %s", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -30,10 +30,7 @@ import (
|
|||
|
||||
func TestStartStop(t *testing.T) {
|
||||
|
||||
runner := util.MinikubeRunner{
|
||||
Args: *args,
|
||||
BinaryPath: *binaryPath,
|
||||
T: t}
|
||||
runner := NewMinikubeRunner(t)
|
||||
runner.RunCommand("delete", false)
|
||||
runner.CheckStatus(state.None.String())
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: busybox
|
||||
generateName: busybox-
|
||||
labels:
|
||||
integration-test: busybox
|
||||
spec:
|
||||
|
|
|
@ -42,6 +42,7 @@ type MinikubeRunner struct {
|
|||
T *testing.T
|
||||
BinaryPath string
|
||||
Args string
|
||||
StartArgs string
|
||||
}
|
||||
|
||||
func (m *MinikubeRunner) Run(cmd string) error {
|
||||
|
@ -103,7 +104,7 @@ func (m *MinikubeRunner) SSH(command string) (string, error) {
|
|||
}
|
||||
|
||||
func (m *MinikubeRunner) Start() {
|
||||
m.RunCommand(fmt.Sprintf("start %s", m.Args), true)
|
||||
m.RunCommand(fmt.Sprintf("start %s %s", m.StartArgs, m.Args), true)
|
||||
}
|
||||
|
||||
func (m *MinikubeRunner) EnsureRunning() {
|
||||
|
@ -129,7 +130,11 @@ func (m *MinikubeRunner) SetEnvFromEnvCmdOutput(dockerEnvVars string) error {
|
|||
}
|
||||
|
||||
func (m *MinikubeRunner) GetStatus() string {
|
||||
return m.RunCommand("status --format={{.MinikubeStatus}}", true)
|
||||
return m.RunCommand(fmt.Sprintf("status --format={{.MinikubeStatus}} %s", m.Args), true)
|
||||
}
|
||||
|
||||
func (m *MinikubeRunner) GetLogs() string {
|
||||
return m.RunCommand(fmt.Sprintf("logs %s", m.Args), true)
|
||||
}
|
||||
|
||||
func (m *MinikubeRunner) CheckStatus(desired string) {
|
||||
|
@ -212,26 +217,26 @@ func (k *KubectlRunner) DeleteNamespace(namespace string) error {
|
|||
}
|
||||
|
||||
func WaitForBusyboxRunning(t *testing.T, namespace string) error {
|
||||
client, err := GetClient()
|
||||
client, err := commonutil.GetClient()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "getting kubernetes client")
|
||||
}
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"integration-test": "busybox"}))
|
||||
return WaitForPodsWithLabelRunning(client, namespace, selector)
|
||||
return commonutil.WaitForPodsWithLabelRunning(client, namespace, selector)
|
||||
}
|
||||
|
||||
func WaitForDNSRunning(t *testing.T) error {
|
||||
client, err := GetClient()
|
||||
client, err := commonutil.GetClient()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "getting kubernetes client")
|
||||
}
|
||||
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kube-dns"}))
|
||||
if err := WaitForPodsWithLabelRunning(client, "kube-system", selector); err != nil {
|
||||
if err := commonutil.WaitForPodsWithLabelRunning(client, "kube-system", selector); err != nil {
|
||||
return errors.Wrap(err, "waiting for kube-dns pods")
|
||||
}
|
||||
|
||||
if err := WaitForService(t, client, "kube-system", "kube-dns", true, time.Millisecond*500, time.Minute*10); err != nil {
|
||||
if err := commonutil.WaitForService(client, "kube-system", "kube-dns", true, time.Millisecond*500, time.Minute*10); err != nil {
|
||||
t.Errorf("Error waiting for kube-dns service to be up")
|
||||
}
|
||||
|
||||
|
@ -239,19 +244,19 @@ func WaitForDNSRunning(t *testing.T) error {
|
|||
}
|
||||
|
||||
func WaitForDashboardRunning(t *testing.T) error {
|
||||
client, err := GetClient()
|
||||
client, err := commonutil.GetClient()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "getting kubernetes client")
|
||||
}
|
||||
if err := WaitForRCToStabilize(t, client, "kube-system", "kubernetes-dashboard", time.Minute*10); err != nil {
|
||||
if err := commonutil.WaitForRCToStabilize(client, "kube-system", "kubernetes-dashboard", time.Minute*10); err != nil {
|
||||
return errors.Wrap(err, "waiting for dashboard RC to stabilize")
|
||||
}
|
||||
|
||||
if err := WaitForService(t, client, "kube-system", "kubernetes-dashboard", true, time.Millisecond*500, time.Minute*10); err != nil {
|
||||
if err := commonutil.WaitForService(client, "kube-system", "kubernetes-dashboard", true, time.Millisecond*500, time.Minute*10); err != nil {
|
||||
return errors.Wrap(err, "waiting for dashboard service to be up")
|
||||
}
|
||||
|
||||
if err := WaitForServiceEndpointsNum(t, client, "kube-system", "kubernetes-dashboard", 1, time.Second*3, time.Minute*10); err != nil {
|
||||
if err := commonutil.WaitForServiceEndpointsNum(client, "kube-system", "kubernetes-dashboard", 1, time.Second*3, time.Minute*10); err != nil {
|
||||
return errors.Wrap(err, "waiting for one dashboard endpoint to be up")
|
||||
}
|
||||
|
||||
|
|
|
@ -36,10 +36,7 @@ func TestVersionedFunctional(t *testing.T) {
|
|||
var minikubeRunner util.MinikubeRunner
|
||||
for _, version := range k8sVersions {
|
||||
vArgs := fmt.Sprintf("%s --kubernetes-version %s", *args, version.Version)
|
||||
minikubeRunner = util.MinikubeRunner{
|
||||
BinaryPath: *binaryPath,
|
||||
Args: vArgs,
|
||||
T: t}
|
||||
minikubeRunner = NewMinikubeRunner(t)
|
||||
minikubeRunner.EnsureRunning()
|
||||
|
||||
t.Run("Status", testClusterStatus)
|
||||
|
|
Loading…
Reference in New Issue