merge upstream
commit
453b8630f4
|
@ -101,6 +101,7 @@ const (
|
|||
downloadOnly = "download-only"
|
||||
dnsProxy = "dns-proxy"
|
||||
hostDNSResolver = "host-dns-resolver"
|
||||
waitUntilHealthy = "wait"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -140,6 +141,7 @@ func initMinikubeFlags() {
|
|||
startCmd.Flags().String(criSocket, "", "The cri socket path to be used")
|
||||
startCmd.Flags().String(networkPlugin, "", "The name of the network plugin")
|
||||
startCmd.Flags().Bool(enableDefaultCNI, false, "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \"--network-plugin=cni\"")
|
||||
startCmd.Flags().Bool(waitUntilHealthy, true, "Wait until Kubernetes core services are healthy before exiting")
|
||||
}
|
||||
|
||||
// initKubernetesFlags inits the commandline flags for kubernetes related options
|
||||
|
@ -255,9 +257,10 @@ func runStart(cmd *cobra.Command, args []string) {
|
|||
}
|
||||
// special ops for none driver, like change minikube directory.
|
||||
prepareNone(viper.GetString(vmDriver))
|
||||
|
||||
if err := bs.WaitCluster(config.KubernetesConfig); err != nil {
|
||||
exit.WithError("Wait failed", err)
|
||||
if viper.GetBool(waitUntilHealthy) {
|
||||
if err := bs.WaitCluster(config.KubernetesConfig); err != nil {
|
||||
exit.WithError("Wait failed", err)
|
||||
}
|
||||
}
|
||||
showKubectlConnectInfo(kubeconfig)
|
||||
|
||||
|
@ -344,7 +347,11 @@ func showKubectlConnectInfo(kubeconfig *pkgutil.KubeConfigSetup) {
|
|||
if kubeconfig.KeepContext {
|
||||
console.OutT(console.Kubectl, "To connect to this cluster, use: kubectl --context={{.name}}", console.Arg{"name": kubeconfig.ClusterName})
|
||||
} else {
|
||||
console.OutT(console.Ready, "Done! kubectl is now configured to use {{.name}}", console.Arg{"name": cfg.GetMachineName()})
|
||||
if !viper.GetBool(waitUntilHealthy) {
|
||||
console.OutT(console.Ready, "kubectl has been configured configured to use {{.name}}", console.Arg{"name": cfg.GetMachineName()})
|
||||
} else {
|
||||
console.OutT(console.Ready, "Done! kubectl is now configured to use {{.name}}", console.Arg{"name": cfg.GetMachineName()})
|
||||
}
|
||||
}
|
||||
_, err := exec.LookPath("kubectl")
|
||||
if err != nil {
|
||||
|
|
|
@ -2,7 +2,7 @@ apiVersion: extensions/v1beta1
|
|||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
kubernetes.io/minikube-addons: registry-proxy
|
||||
kubernetes.io/minikube-addons: registry
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
name: registry-proxy
|
||||
namespace: kube-system
|
||||
|
@ -10,7 +10,7 @@ spec:
|
|||
template:
|
||||
metadata:
|
||||
labels:
|
||||
kubernetes.io/minikube-addons: registry-proxy
|
||||
kubernetes.io/minikube-addons: registry
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
containers:
|
||||
|
|
|
@ -13,6 +13,7 @@ spec:
|
|||
template:
|
||||
metadata:
|
||||
labels:
|
||||
actual-registry: "true"
|
||||
kubernetes.io/minikube-addons: registry
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
|
|
|
@ -12,4 +12,5 @@ spec:
|
|||
- port: 80
|
||||
targetPort: 5000
|
||||
selector:
|
||||
actual-registry: "true"
|
||||
kubernetes.io/minikube-addons: registry
|
||||
|
|
|
@ -76,8 +76,8 @@ func readLineWithTimeout(b *bufio.Reader, timeout time.Duration) (string, error)
|
|||
|
||||
func testDashboard(t *testing.T) {
|
||||
t.Parallel()
|
||||
minikubeRunner := NewMinikubeRunner(t)
|
||||
cmd, out := minikubeRunner.RunDaemon("dashboard --url")
|
||||
mk := NewMinikubeRunner(t, "--wait=false")
|
||||
cmd, out := mk.RunDaemon("dashboard --url")
|
||||
defer func() {
|
||||
err := cmd.Process.Kill()
|
||||
if err != nil {
|
||||
|
@ -121,10 +121,10 @@ func testDashboard(t *testing.T) {
|
|||
|
||||
func testIngressController(t *testing.T) {
|
||||
t.Parallel()
|
||||
minikubeRunner := NewMinikubeRunner(t)
|
||||
mk := NewMinikubeRunner(t, "--wait=false")
|
||||
kubectlRunner := util.NewKubectlRunner(t)
|
||||
|
||||
minikubeRunner.RunCommand("addons enable ingress", true)
|
||||
mk.RunCommand("addons enable ingress", true)
|
||||
if err := util.WaitForIngressControllerRunning(t); err != nil {
|
||||
t.Fatalf("waiting for ingress-controller to be up: %v", err)
|
||||
}
|
||||
|
@ -154,7 +154,7 @@ func testIngressController(t *testing.T) {
|
|||
checkIngress := func() error {
|
||||
expectedStr := "Welcome to nginx!"
|
||||
runCmd := fmt.Sprintf("curl http://127.0.0.1:80 -H 'Host: nginx.example.com'")
|
||||
sshCmdOutput, _ := minikubeRunner.SSH(runCmd)
|
||||
sshCmdOutput, _ := mk.SSH(runCmd)
|
||||
if !strings.Contains(sshCmdOutput, expectedStr) {
|
||||
return fmt.Errorf("ExpectedStr sshCmdOutput to be: %s. Output was: %s", expectedStr, sshCmdOutput)
|
||||
}
|
||||
|
@ -172,15 +172,15 @@ func testIngressController(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}()
|
||||
minikubeRunner.RunCommand("addons disable ingress", true)
|
||||
mk.RunCommand("addons disable ingress", true)
|
||||
}
|
||||
|
||||
func testServicesList(t *testing.T) {
|
||||
t.Parallel()
|
||||
minikubeRunner := NewMinikubeRunner(t)
|
||||
mk := NewMinikubeRunner(t)
|
||||
|
||||
checkServices := func() error {
|
||||
output := minikubeRunner.RunCommand("service list", false)
|
||||
output := mk.RunCommand("service list", false)
|
||||
if !strings.Contains(output, "kubernetes") {
|
||||
return fmt.Errorf("Error, kubernetes service missing from output %s", output)
|
||||
}
|
||||
|
@ -259,8 +259,8 @@ func testRegistry(t *testing.T) {
|
|||
minikubeRunner.RunCommand("addons disable registry", true)
|
||||
}
|
||||
func testGvisor(t *testing.T) {
|
||||
minikubeRunner := NewMinikubeRunner(t)
|
||||
minikubeRunner.RunCommand("addons enable gvisor", true)
|
||||
mk := NewMinikubeRunner(t, "--wait=false")
|
||||
mk.RunCommand("addons enable gvisor", true)
|
||||
|
||||
t.Log("waiting for gvisor controller to come up")
|
||||
if err := util.WaitForGvisorControllerRunning(t); err != nil {
|
||||
|
@ -275,7 +275,7 @@ func testGvisor(t *testing.T) {
|
|||
}
|
||||
|
||||
t.Log("disabling gvisor addon")
|
||||
minikubeRunner.RunCommand("addons disable gvisor", true)
|
||||
mk.RunCommand("addons disable gvisor", true)
|
||||
t.Log("waiting for gvisor controller pod to be deleted")
|
||||
if err := util.WaitForGvisorControllerDeleted(); err != nil {
|
||||
t.Fatalf("waiting for gvisor controller to be deleted: %v", err)
|
||||
|
@ -291,9 +291,9 @@ func testGvisor(t *testing.T) {
|
|||
}
|
||||
|
||||
func testGvisorRestart(t *testing.T) {
|
||||
minikubeRunner := NewMinikubeRunner(t)
|
||||
minikubeRunner.EnsureRunning()
|
||||
minikubeRunner.RunCommand("addons enable gvisor", true)
|
||||
mk := NewMinikubeRunner(t, "--wait=false")
|
||||
mk.EnsureRunning()
|
||||
mk.RunCommand("addons enable gvisor", true)
|
||||
|
||||
t.Log("waiting for gvisor controller to come up")
|
||||
if err := util.WaitForGvisorControllerRunning(t); err != nil {
|
||||
|
@ -301,10 +301,10 @@ func testGvisorRestart(t *testing.T) {
|
|||
}
|
||||
|
||||
// TODO: @priyawadhwa to add test for stop as well
|
||||
minikubeRunner.RunCommand("delete", false)
|
||||
minikubeRunner.CheckStatus(state.None.String())
|
||||
minikubeRunner.Start()
|
||||
minikubeRunner.CheckStatus(state.Running.String())
|
||||
mk.RunCommand("delete", false)
|
||||
mk.CheckStatus(state.None.String())
|
||||
mk.Start()
|
||||
mk.CheckStatus(state.Running.String())
|
||||
|
||||
t.Log("waiting for gvisor controller to come up")
|
||||
if err := util.WaitForGvisorControllerRunning(t); err != nil {
|
||||
|
|
|
@ -30,8 +30,7 @@ import (
|
|||
// Assert that docker-env subcommand outputs usable information for "docker ps"
|
||||
func testClusterEnv(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
r := NewMinikubeRunner(t)
|
||||
r := NewMinikubeRunner(t, "--wait=false")
|
||||
|
||||
// Set a specific shell syntax so that we don't have to handle every possible user shell
|
||||
envOut := r.RunCommand("docker-env --shell=bash", true)
|
||||
|
|
|
@ -25,9 +25,9 @@ import (
|
|||
|
||||
func testClusterLogs(t *testing.T) {
|
||||
t.Parallel()
|
||||
minikubeRunner := NewMinikubeRunner(t)
|
||||
minikubeRunner.EnsureRunning()
|
||||
logsCmdOutput := minikubeRunner.GetLogs()
|
||||
mk := NewMinikubeRunner(t)
|
||||
mk.EnsureRunning()
|
||||
logsCmdOutput := mk.GetLogs()
|
||||
|
||||
// check for # of lines or check for strings
|
||||
logWords := []string{"minikube", ".go"}
|
||||
|
|
|
@ -25,9 +25,9 @@ import (
|
|||
|
||||
func testClusterSSH(t *testing.T) {
|
||||
t.Parallel()
|
||||
minikubeRunner := NewMinikubeRunner(t)
|
||||
mk := NewMinikubeRunner(t, "--wait=false")
|
||||
expectedStr := "hello"
|
||||
sshCmdOutput := minikubeRunner.RunCommand("ssh echo "+expectedStr, true)
|
||||
sshCmdOutput := mk.RunCommand("ssh echo "+expectedStr, true)
|
||||
if !strings.Contains(sshCmdOutput, expectedStr) {
|
||||
t.Fatalf("ExpectedStr sshCmdOutput to be: %s. Output was: %s", expectedStr, sshCmdOutput)
|
||||
}
|
||||
|
|
|
@ -27,8 +27,8 @@ import (
|
|||
)
|
||||
|
||||
func TestDocker(t *testing.T) {
|
||||
mk := NewMinikubeRunner(t)
|
||||
if strings.Contains(mk.StartArgs, "--vm-driver=none") {
|
||||
mk := NewMinikubeRunner(t, "--wait=false")
|
||||
if usingNoneDriver(mk) {
|
||||
t.Skip("skipping test as none driver does not bundle docker")
|
||||
}
|
||||
|
||||
|
@ -40,8 +40,8 @@ func TestDocker(t *testing.T) {
|
|||
t.Logf("pre-delete failed (probably ok): %v", err)
|
||||
}
|
||||
|
||||
startCmd := fmt.Sprintf("start %s %s %s", mk.StartArgs, mk.Args,
|
||||
"--docker-env=FOO=BAR --docker-env=BAZ=BAT --docker-opt=debug --docker-opt=icc=true --alsologtostderr --v=5")
|
||||
startCmd := fmt.Sprintf("start %s %s %s", mk.StartArgs, mk.GlobalArgs,
|
||||
"--docker-env=FOO=BAR --docker-env=BAZ=BAT --docker-opt=debug --docker-opt=icc=true")
|
||||
stdout, stderr, err := mk.RunWithContext(ctx, startCmd)
|
||||
if err != nil {
|
||||
t.Fatalf("start: %v\nstdout: %s\nstderr: %s", err, stdout, stderr)
|
||||
|
|
|
@ -19,6 +19,7 @@ package integration
|
|||
import (
|
||||
"flag"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/minikube/test/integration/util"
|
||||
|
@ -31,17 +32,17 @@ func TestMain(m *testing.M) {
|
|||
}
|
||||
|
||||
var binaryPath = flag.String("binary", "../../out/minikube", "path to minikube binary")
|
||||
var args = flag.String("minikube-args", "", "Arguments to pass to minikube")
|
||||
var globalArgs = flag.String("minikube-args", "", "Arguments to pass to minikube")
|
||||
var startArgs = flag.String("minikube-start-args", "", "Arguments to pass to minikube start")
|
||||
var mountArgs = flag.String("minikube-mount-args", "", "Arguments to pass to minikube mount")
|
||||
var testdataDir = flag.String("testdata-dir", "testdata", "the directory relative to test/integration where the testdata lives")
|
||||
|
||||
// NewMinikubeRunner creates a new MinikubeRunner
|
||||
func NewMinikubeRunner(t *testing.T) util.MinikubeRunner {
|
||||
func NewMinikubeRunner(t *testing.T, extraArgs ...string) util.MinikubeRunner {
|
||||
return util.MinikubeRunner{
|
||||
Args: *args,
|
||||
BinaryPath: *binaryPath,
|
||||
StartArgs: *startArgs,
|
||||
StartArgs: *startArgs + strings.Join(extraArgs, " "),
|
||||
GlobalArgs: *globalArgs,
|
||||
MountArgs: *mountArgs,
|
||||
T: t,
|
||||
}
|
||||
|
|
|
@ -26,10 +26,10 @@ import (
|
|||
|
||||
func TestISO(t *testing.T) {
|
||||
|
||||
minikubeRunner := NewMinikubeRunner(t)
|
||||
mk := NewMinikubeRunner(t, "--wait=false")
|
||||
|
||||
minikubeRunner.RunCommand("delete", false)
|
||||
minikubeRunner.Start()
|
||||
mk.RunCommand("delete", false)
|
||||
mk.Start()
|
||||
|
||||
t.Run("permissions", testMountPermissions)
|
||||
t.Run("packages", testPackages)
|
||||
|
@ -37,14 +37,14 @@ func TestISO(t *testing.T) {
|
|||
}
|
||||
|
||||
func testMountPermissions(t *testing.T) {
|
||||
minikubeRunner := NewMinikubeRunner(t)
|
||||
mk := NewMinikubeRunner(t, "--wait=false")
|
||||
// test mount permissions
|
||||
mountPoints := []string{"/Users", "/hosthome"}
|
||||
perms := "drwxr-xr-x"
|
||||
foundMount := false
|
||||
|
||||
for _, dir := range mountPoints {
|
||||
output, err := minikubeRunner.SSH(fmt.Sprintf("ls -l %s", dir))
|
||||
output, err := mk.SSH(fmt.Sprintf("ls -l %s", dir))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
@ -59,7 +59,7 @@ func testMountPermissions(t *testing.T) {
|
|||
}
|
||||
|
||||
func testPackages(t *testing.T) {
|
||||
minikubeRunner := NewMinikubeRunner(t)
|
||||
mk := NewMinikubeRunner(t, "--wait=false")
|
||||
|
||||
packages := []string{
|
||||
"git",
|
||||
|
@ -73,7 +73,7 @@ func testPackages(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, pkg := range packages {
|
||||
if output, err := minikubeRunner.SSH(fmt.Sprintf("which %s", pkg)); err != nil {
|
||||
if output, err := mk.SSH(fmt.Sprintf("which %s", pkg)); err != nil {
|
||||
t.Errorf("Error finding package: %s. Error: %v. Output: %s", pkg, err, output)
|
||||
}
|
||||
}
|
||||
|
@ -81,7 +81,7 @@ func testPackages(t *testing.T) {
|
|||
}
|
||||
|
||||
func testPersistence(t *testing.T) {
|
||||
minikubeRunner := NewMinikubeRunner(t)
|
||||
mk := NewMinikubeRunner(t, "--wait=false")
|
||||
|
||||
for _, dir := range []string{
|
||||
"/data",
|
||||
|
@ -92,7 +92,7 @@ func testPersistence(t *testing.T) {
|
|||
"/var/lib/toolbox",
|
||||
"/var/lib/boot2docker",
|
||||
} {
|
||||
output, err := minikubeRunner.SSH(fmt.Sprintf("df %s | tail -n 1 | awk '{print $1}'", dir))
|
||||
output, err := mk.SSH(fmt.Sprintf("df %s | tail -n 1 | awk '{print $1}'", dir))
|
||||
if err != nil {
|
||||
t.Errorf("Error checking device for %s. Error: %v", dir, err)
|
||||
}
|
||||
|
|
|
@ -38,12 +38,12 @@ func testMounting(t *testing.T) {
|
|||
if runtime.GOOS == "darwin" {
|
||||
t.Skip("mount tests disabled in darwin due to timeout (issue#3200)")
|
||||
}
|
||||
if strings.Contains(*args, "--vm-driver=none") {
|
||||
if strings.Contains(*globalArgs, "--vm-driver=none") {
|
||||
t.Skip("skipping test for none driver as it does not need mount")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
minikubeRunner := NewMinikubeRunner(t)
|
||||
mk := NewMinikubeRunner(t, "--wait=false")
|
||||
|
||||
tempDir, err := ioutil.TempDir("", "mounttest")
|
||||
if err != nil {
|
||||
|
@ -51,8 +51,8 @@ func testMounting(t *testing.T) {
|
|||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
mountCmd := getMountCmd(minikubeRunner, tempDir)
|
||||
cmd, _, _ := minikubeRunner.RunDaemon2(mountCmd)
|
||||
mountCmd := getMountCmd(mk, tempDir)
|
||||
cmd, _, _ := mk.RunDaemon2(mountCmd)
|
||||
defer func() {
|
||||
err := cmd.Process.Kill()
|
||||
if err != nil {
|
||||
|
@ -99,7 +99,7 @@ func testMounting(t *testing.T) {
|
|||
t.Logf("Pods appear to be running")
|
||||
|
||||
mountTest := func() error {
|
||||
if err := verifyFiles(minikubeRunner, kubectlRunner, tempDir, podName, expected); err != nil {
|
||||
if err := verifyFiles(mk, kubectlRunner, tempDir, podName, expected); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
|
@ -111,10 +111,10 @@ func testMounting(t *testing.T) {
|
|||
|
||||
}
|
||||
|
||||
func getMountCmd(minikubeRunner util.MinikubeRunner, mountDir string) string {
|
||||
func getMountCmd(mk util.MinikubeRunner, mountDir string) string {
|
||||
var mountCmd string
|
||||
if len(minikubeRunner.MountArgs) > 0 {
|
||||
mountCmd = fmt.Sprintf("mount %s %s:/mount-9p", minikubeRunner.MountArgs, mountDir)
|
||||
if len(mk.MountArgs) > 0 {
|
||||
mountCmd = fmt.Sprintf("mount %s %s:/mount-9p", mk.MountArgs, mountDir)
|
||||
} else {
|
||||
mountCmd = fmt.Sprintf("mount %s:/mount-9p", mountDir)
|
||||
}
|
||||
|
@ -144,7 +144,7 @@ func waitForPods(s map[string]string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func verifyFiles(minikubeRunner util.MinikubeRunner, kubectlRunner *util.KubectlRunner, tempDir string, podName string, expected string) error {
|
||||
func verifyFiles(mk util.MinikubeRunner, kubectlRunner *util.KubectlRunner, tempDir string, podName string, expected string) error {
|
||||
path := filepath.Join(tempDir, "frompod")
|
||||
out, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
|
@ -167,7 +167,7 @@ func verifyFiles(minikubeRunner util.MinikubeRunner, kubectlRunner *util.Kubectl
|
|||
files := []string{"fromhost", "frompod"}
|
||||
for _, file := range files {
|
||||
statCmd := fmt.Sprintf("stat /mount-9p/%s", file)
|
||||
statOutput, err := minikubeRunner.SSH(statCmd)
|
||||
statOutput, err := mk.SSH(statCmd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to stat %s via SSH. error %v, %s", file, err, statOutput)
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ package integration
|
|||
import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -30,11 +29,11 @@ import (
|
|||
)
|
||||
|
||||
func TestPersistence(t *testing.T) {
|
||||
minikubeRunner := NewMinikubeRunner(t)
|
||||
if strings.Contains(minikubeRunner.StartArgs, "--vm-driver=none") {
|
||||
mk := NewMinikubeRunner(t, "--wait=false")
|
||||
if usingNoneDriver(mk) {
|
||||
t.Skip("skipping test as none driver does not support persistence")
|
||||
}
|
||||
minikubeRunner.EnsureRunning()
|
||||
mk.EnsureRunning()
|
||||
|
||||
kubectlRunner := util.NewKubectlRunner(t)
|
||||
curdir, err := filepath.Abs("")
|
||||
|
@ -59,19 +58,19 @@ func TestPersistence(t *testing.T) {
|
|||
verify(t)
|
||||
|
||||
// Now restart minikube and make sure the pod is still there.
|
||||
// minikubeRunner.RunCommand("stop", true)
|
||||
// minikubeRunner.CheckStatus("Stopped")
|
||||
// mk.RunCommand("stop", true)
|
||||
// mk.CheckStatus("Stopped")
|
||||
checkStop := func() error {
|
||||
minikubeRunner.RunCommand("stop", true)
|
||||
return minikubeRunner.CheckStatusNoFail(state.Stopped.String())
|
||||
mk.RunCommand("stop", true)
|
||||
return mk.CheckStatusNoFail(state.Stopped.String())
|
||||
}
|
||||
|
||||
if err := util.Retry(t, checkStop, 5*time.Second, 6); err != nil {
|
||||
t.Fatalf("timed out while checking stopped status: %v", err)
|
||||
}
|
||||
|
||||
minikubeRunner.Start()
|
||||
minikubeRunner.CheckStatus(state.Running.String())
|
||||
mk.Start()
|
||||
mk.CheckStatus(state.Running.String())
|
||||
|
||||
// Make sure the same things come up after we've restarted.
|
||||
verify(t)
|
||||
|
|
|
@ -71,7 +71,7 @@ func TestProxy(t *testing.T) {
|
|||
t.Fatalf("Failed to set up the test proxy: %s", err)
|
||||
}
|
||||
|
||||
// making sure there is no running miniukube to avoid https://github.com/kubernetes/minikube/issues/4132
|
||||
// making sure there is no running minikube to avoid https://github.com/kubernetes/minikube/issues/4132
|
||||
r := NewMinikubeRunner(t)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
|
||||
defer cancel()
|
||||
|
@ -109,10 +109,10 @@ func TestProxy(t *testing.T) {
|
|||
|
||||
// testProxyWarning checks user is warned correctly about the proxy related env vars
|
||||
func testProxyWarning(t *testing.T) {
|
||||
r := NewMinikubeRunner(t)
|
||||
r := NewMinikubeRunner(t, "--wait=false")
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
|
||||
defer cancel()
|
||||
startCmd := fmt.Sprintf("start %s %s %s", r.StartArgs, r.Args, "--alsologtostderr --v=5")
|
||||
startCmd := fmt.Sprintf("start %s %s", r.StartArgs, r.GlobalArgs)
|
||||
stdout, stderr, err := r.RunWithContext(ctx, startCmd)
|
||||
if err != nil {
|
||||
t.Fatalf("start: %v\nstdout: %s\nstderr: %s", err, stdout, stderr)
|
||||
|
@ -131,8 +131,8 @@ func testProxyWarning(t *testing.T) {
|
|||
|
||||
// testProxyDashboard checks if dashboard URL is accessible if proxy is set
|
||||
func testProxyDashboard(t *testing.T) {
|
||||
minikubeRunner := NewMinikubeRunner(t)
|
||||
cmd, out := minikubeRunner.RunDaemon("dashboard --url")
|
||||
mk := NewMinikubeRunner(t, "--wait=false")
|
||||
cmd, out := mk.RunDaemon("dashboard --url")
|
||||
defer func() {
|
||||
err := cmd.Process.Kill()
|
||||
if err != nil {
|
||||
|
|
|
@ -46,7 +46,7 @@ func testTunnel(t *testing.T) {
|
|||
}
|
||||
|
||||
t.Log("starting tunnel test...")
|
||||
runner := NewMinikubeRunner(t)
|
||||
runner := NewMinikubeRunner(t, "--wait=false")
|
||||
go func() {
|
||||
output := runner.RunCommand("tunnel --alsologtostderr -v 8 --logtostderr", true)
|
||||
if t.Failed() {
|
||||
|
|
|
@ -43,7 +43,7 @@ const kubectlBinary = "kubectl"
|
|||
type MinikubeRunner struct {
|
||||
T *testing.T
|
||||
BinaryPath string
|
||||
Args string
|
||||
GlobalArgs string
|
||||
StartArgs string
|
||||
MountArgs string
|
||||
Runtime string
|
||||
|
@ -208,9 +208,9 @@ func (m *MinikubeRunner) SSH(command string) (string, error) {
|
|||
return string(stdout), nil
|
||||
}
|
||||
|
||||
// Start starts the container runtime
|
||||
// Start starts the cluster
|
||||
func (m *MinikubeRunner) Start(opts ...string) {
|
||||
cmd := fmt.Sprintf("start %s %s %s --alsologtostderr --v=2", m.StartArgs, m.Args, strings.Join(opts, " "))
|
||||
cmd := fmt.Sprintf("start %s %s %s --alsologtostderr --v=2", m.StartArgs, m.GlobalArgs, strings.Join(opts, " "))
|
||||
m.RunCommand(cmd, true)
|
||||
}
|
||||
|
||||
|
@ -234,12 +234,12 @@ func (m *MinikubeRunner) ParseEnvCmdOutput(out string) map[string]string {
|
|||
|
||||
// GetStatus returns the status of a service
|
||||
func (m *MinikubeRunner) GetStatus() string {
|
||||
return m.RunCommand(fmt.Sprintf("status --format={{.Host}} %s", m.Args), false)
|
||||
return m.RunCommand(fmt.Sprintf("status --format={{.Host}} %s", m.GlobalArgs), false)
|
||||
}
|
||||
|
||||
// GetLogs returns the logs of a service
|
||||
func (m *MinikubeRunner) GetLogs() string {
|
||||
return m.RunCommand(fmt.Sprintf("logs %s", m.Args), true)
|
||||
return m.RunCommand(fmt.Sprintf("logs %s", m.GlobalArgs), true)
|
||||
}
|
||||
|
||||
// CheckStatus makes sure the service has the desired status, or cause fatal error
|
||||
|
|
|
@ -255,6 +255,7 @@
|
|||
"failed to open browser: {{.error}}": "",
|
||||
"kube-system": "",
|
||||
"kubectl and minikube configuration will be stored in {{.home_folder}}": "",
|
||||
"kubectl has been configured configured to use {{.name}}": "",
|
||||
"kubectl not found in PATH, but is required for the dashboard. Installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "",
|
||||
"kubectl proxy": "",
|
||||
"logdir set failed": "",
|
||||
|
|
|
@ -255,6 +255,7 @@
|
|||
"failed to open browser: {{.error}}": "",
|
||||
"kube-system": "",
|
||||
"kubectl and minikube configuration will be stored in {{.home_folder}}": "",
|
||||
"kubectl has been configured configured to use {{.name}}": "",
|
||||
"kubectl not found in PATH, but is required for the dashboard. Installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "",
|
||||
"kubectl proxy": "",
|
||||
"logdir set failed": "",
|
||||
|
|
Loading…
Reference in New Issue