Added default profile

pull/4946/head
Medya Gh 2019-07-23 19:05:57 -07:00
parent 43d1c66705
commit d1a41e0015
22 changed files with 116 additions and 86 deletions

View File

@ -70,10 +70,15 @@ func (s *PodStore) Stop() {
close(s.stopCh)
}
// GetClient gets the client from config
func GetClient() (kubernetes.Interface, error) {
// GetClient gkets the client from config
func GetClient(kubectlContext ...string) (kubernetes.Interface, error) {
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
configOverrides := &clientcmd.ConfigOverrides{}
if kubectlContext != nil {
configOverrides = &clientcmd.ConfigOverrides{
CurrentContext: kubectlContext[0],
}
}
kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides)
config, err := kubeConfig.ClientConfig()
if err != nil {

View File

@ -121,15 +121,16 @@ func testDashboard(t *testing.T) {
func testIngressController(t *testing.T) {
t.Parallel()
mk := NewMinikubeRunner(t, "--wait=false")
kr := util.NewKubectlRunner(t)
p := "minikube"
mk := NewMinikubeRunner(t, p, "--wait=false")
kr := util.NewKubectlRunner(t, p)
mk.RunCommand("addons enable ingress", true)
if err := util.WaitForIngressControllerRunning(t); err != nil {
if err := util.WaitForIngressControllerRunning(t, p); err != nil {
t.Fatalf("waiting for ingress-controller to be up: %v", err)
}
if err := util.WaitForIngressDefaultBackendRunning(t); err != nil {
if err := util.WaitForIngressDefaultBackendRunning(t, p); err != nil {
t.Fatalf("waiting for default-http-backend to be up: %v", err)
}
@ -147,7 +148,7 @@ func testIngressController(t *testing.T) {
t.Fatalf("creating nginx ingress resource: %v", err)
}
if err := util.WaitForNginxRunning(t); err != nil {
if err := util.WaitForNginxRunning(t, p); err != nil {
t.Fatalf("waiting for nginx to be up: %v", err)
}
@ -177,7 +178,8 @@ func testIngressController(t *testing.T) {
func testServicesList(t *testing.T) {
t.Parallel()
mk := NewMinikubeRunner(t)
p := "minikube"
mk := NewMinikubeRunner(t, p)
checkServices := func() error {
output := mk.RunCommand("service list", false)
@ -192,9 +194,10 @@ func testServicesList(t *testing.T) {
}
func testRegistry(t *testing.T) {
t.Parallel()
mk := NewMinikubeRunner(t)
p := "minikube"
mk := NewMinikubeRunner(t, p)
mk.RunCommand("addons enable registry", true)
client, err := pkgutil.GetClient()
client, err := pkgutil.GetClient(p)
if err != nil {
t.Fatalf("getting kubernetes client: %v", err)
}
@ -239,7 +242,8 @@ func testRegistry(t *testing.T) {
}
t.Log("checking registry access from inside cluster")
kr := util.NewKubectlRunner(t)
kr := util.NewKubectlRunner(t, p)
// TODO: Fix this
out, _ := kr.RunCommand([]string{
"run",
"registry-test",
@ -264,44 +268,46 @@ func testRegistry(t *testing.T) {
mk.RunCommand("addons disable registry", true)
}
func testGvisor(t *testing.T) {
mk := NewMinikubeRunner(t, "--wait=false")
p := "minikube"
mk := NewMinikubeRunner(t, p, "--wait=false")
mk.RunCommand("addons enable gvisor", true)
t.Log("waiting for gvisor controller to come up")
if err := util.WaitForGvisorControllerRunning(t); err != nil {
if err := util.WaitForGvisorControllerRunning(t, p); err != nil {
t.Fatalf("waiting for gvisor controller to be up: %v", err)
}
createUntrustedWorkload(t)
createUntrustedWorkload(t, p)
t.Log("making sure untrusted workload is Running")
if err := util.WaitForUntrustedNginxRunning(); err != nil {
if err := util.WaitForUntrustedNginxRunning(p); err != nil {
t.Fatalf("waiting for nginx to be up: %v", err)
}
t.Log("disabling gvisor addon")
mk.RunCommand("addons disable gvisor", true)
t.Log("waiting for gvisor controller pod to be deleted")
if err := util.WaitForGvisorControllerDeleted(); err != nil {
if err := util.WaitForGvisorControllerDeleted(p); err != nil {
t.Fatalf("waiting for gvisor controller to be deleted: %v", err)
}
createUntrustedWorkload(t)
createUntrustedWorkload(t, p)
t.Log("waiting for FailedCreatePodSandBox event")
if err := util.WaitForFailedCreatePodSandBoxEvent(); err != nil {
if err := util.WaitForFailedCreatePodSandBoxEvent(p); err != nil {
t.Fatalf("waiting for FailedCreatePodSandBox event: %v", err)
}
deleteUntrustedWorkload(t)
deleteUntrustedWorkload(t, p)
}
func testGvisorRestart(t *testing.T) {
mk := NewMinikubeRunner(t, "--wait=false")
p := "minikube"
mk := NewMinikubeRunner(t, p, "--wait=false")
mk.EnsureRunning()
mk.RunCommand("addons enable gvisor", true)
t.Log("waiting for gvisor controller to come up")
if err := util.WaitForGvisorControllerRunning(t); err != nil {
if err := util.WaitForGvisorControllerRunning(t, p); err != nil {
t.Fatalf("waiting for gvisor controller to be up: %v", err)
}
@ -312,20 +318,20 @@ func testGvisorRestart(t *testing.T) {
mk.CheckStatus(state.Running.String())
t.Log("waiting for gvisor controller to come up")
if err := util.WaitForGvisorControllerRunning(t); err != nil {
if err := util.WaitForGvisorControllerRunning(t, p); err != nil {
t.Fatalf("waiting for gvisor controller to be up: %v", err)
}
createUntrustedWorkload(t)
createUntrustedWorkload(t, p)
t.Log("making sure untrusted workload is Running")
if err := util.WaitForUntrustedNginxRunning(); err != nil {
if err := util.WaitForUntrustedNginxRunning(p); err != nil {
t.Fatalf("waiting for nginx to be up: %v", err)
}
deleteUntrustedWorkload(t)
deleteUntrustedWorkload(t, p)
}
func createUntrustedWorkload(t *testing.T) {
kr := util.NewKubectlRunner(t)
func createUntrustedWorkload(t *testing.T, profile string) {
kr := util.NewKubectlRunner(t, profile)
curdir, err := filepath.Abs("")
if err != nil {
t.Errorf("Error getting the file path for current directory: %s", curdir)
@ -337,8 +343,8 @@ func createUntrustedWorkload(t *testing.T) {
}
}
func deleteUntrustedWorkload(t *testing.T) {
kr := util.NewKubectlRunner(t)
func deleteUntrustedWorkload(t *testing.T, profile string) {
kr := util.NewKubectlRunner(t, profile)
curdir, err := filepath.Abs("")
if err != nil {
t.Errorf("Error getting the file path for current directory: %s", curdir)

View File

@ -32,12 +32,13 @@ import (
func testClusterDNS(t *testing.T) {
t.Parallel()
client, err := pkgutil.GetClient()
p := "minikube"
client, err := pkgutil.GetClient(p)
if err != nil {
t.Fatalf("Error getting kubernetes client %v", err)
}
kr := util.NewKubectlRunner(t)
kr := util.NewKubectlRunner(t, p)
busybox := busyBoxPod(t, client, kr)
defer func() {
if _, err := kr.RunCommand([]string{"delete", "po", busybox}); err != nil {
@ -66,7 +67,7 @@ func busyBoxPod(t *testing.T, c kubernetes.Interface, kr *util.KubectlRunner) st
t.Fatalf("creating busybox pod: %s", err)
}
// TODO(tstromberg): Refactor WaitForBusyboxRunning to return name of pod.
if err := util.WaitForBusyboxRunning(t, "default"); err != nil {
if err := util.WaitForBusyboxRunning(t, "default", "minikube"); err != nil {
t.Fatalf("Waiting for busybox pod to be up: %v", err)
}

View File

@ -30,7 +30,8 @@ import (
// Assert that docker-env subcommand outputs usable information for "docker ps"
func testClusterEnv(t *testing.T) {
t.Parallel()
mk := NewMinikubeRunner(t, "--wait=false")
p := "minikube"
mk := NewMinikubeRunner(t, p, "--wait=false")
// Set a specific shell syntax so that we don't have to handle every possible user shell
envOut := mk.RunCommand("docker-env --shell=bash", true)

View File

@ -25,7 +25,8 @@ import (
func testClusterLogs(t *testing.T) {
t.Parallel()
mk := NewMinikubeRunner(t)
p := "minikube"
mk := NewMinikubeRunner(t, p)
mk.EnsureRunning()
logsCmdOutput := mk.GetLogs()

View File

@ -25,7 +25,8 @@ import (
func testClusterSSH(t *testing.T) {
t.Parallel()
mk := NewMinikubeRunner(t, "--wait=false")
p := "minikube"
mk := NewMinikubeRunner(t, p, "--wait=false")
expectedStr := "hello"
sshCmdOutput := mk.RunCommand("ssh echo "+expectedStr, true)
if !strings.Contains(sshCmdOutput, expectedStr) {

View File

@ -28,7 +28,8 @@ import (
)
func testClusterStatus(t *testing.T) {
kr := util.NewKubectlRunner(t)
p := "minikube"
kr := util.NewKubectlRunner(t, p)
cs := api.ComponentStatusList{}
healthy := func() error {

View File

@ -27,7 +27,8 @@ import (
)
func TestDocker(t *testing.T) {
mk := NewMinikubeRunner(t, "--wait=false")
p := "minkube"
mk := NewMinikubeRunner(t, p, "--wait=false")
if usingNoneDriver(mk) {
t.Skip("skipping test as none driver does not bundle docker")
}

View File

@ -38,9 +38,9 @@ var mountArgs = flag.String("minikube-mount-args", "", "Arguments to pass to min
var testdataDir = flag.String("testdata-dir", "testdata", "the directory relative to test/integration where the testdata lives")
// NewMinikubeRunner creates a new MinikubeRunner
func NewMinikubeRunner(t *testing.T, extraArgs ...string) util.MinikubeRunner {
func NewMinikubeRunner(t *testing.T, profile string, extraArgs ...string) util.MinikubeRunner {
return util.MinikubeRunner{
Profile: "minikube",
Profile: profile,
BinaryPath: *binaryPath,
StartArgs: *startArgs + " " + strings.Join(extraArgs, " "),
GlobalArgs: *globalArgs,

View File

@ -27,7 +27,8 @@ import (
)
func TestFunctional(t *testing.T) {
mk := NewMinikubeRunner(t)
p := "minikube"
mk := NewMinikubeRunner(t, p)
mk.EnsureRunning()
// This one is not parallel, and ensures the cluster comes up
// before we run any other tests.
@ -51,7 +52,8 @@ func TestFunctional(t *testing.T) {
}
func TestFunctionalContainerd(t *testing.T) {
mk := NewMinikubeRunner(t)
p := "minikube"
mk := NewMinikubeRunner(t, p)
if usingNoneDriver(mk) {
t.Skip("Can't run containerd backend with none driver")

View File

@ -25,8 +25,8 @@ import (
)
func TestISO(t *testing.T) {
mk := NewMinikubeRunner(t, "--wait=false")
p := "minikube"
mk := NewMinikubeRunner(t, p, "--wait=false")
mk.RunCommand("delete", false)
mk.Start()
@ -37,7 +37,8 @@ func TestISO(t *testing.T) {
}
func testMountPermissions(t *testing.T) {
mk := NewMinikubeRunner(t, "--wait=false")
p := "minikube"
mk := NewMinikubeRunner(t, p, "--wait=false")
// test mount permissions
mountPoints := []string{"/Users", "/hosthome"}
perms := "drwxr-xr-x"
@ -59,7 +60,8 @@ func testMountPermissions(t *testing.T) {
}
func testPackages(t *testing.T) {
mk := NewMinikubeRunner(t, "--wait=false")
p := "minikube"
mk := NewMinikubeRunner(t, p, "--wait=false")
packages := []string{
"git",
@ -81,7 +83,8 @@ func testPackages(t *testing.T) {
}
func testPersistence(t *testing.T) {
mk := NewMinikubeRunner(t, "--wait=false")
p := "minikube"
mk := NewMinikubeRunner(t, p, "--wait=false")
for _, dir := range []string{
"/data",

View File

@ -43,7 +43,8 @@ func testMounting(t *testing.T) {
}
t.Parallel()
mk := NewMinikubeRunner(t, "--wait=false")
p := "minikube"
mk := NewMinikubeRunner(t, p, "--wait=false")
tempDir, err := ioutil.TempDir("", "mounttest")
if err != nil {
@ -60,7 +61,7 @@ func testMounting(t *testing.T) {
}
}()
kr := util.NewKubectlRunner(t)
kr := util.NewKubectlRunner(t, p)
podName := "busybox-mount"
curdir, err := filepath.Abs("")
if err != nil {

View File

@ -29,13 +29,14 @@ import (
)
func TestPersistence(t *testing.T) {
mk := NewMinikubeRunner(t, "--wait=false")
p := "minikube"
mk := NewMinikubeRunner(t, p, "--wait=false")
if usingNoneDriver(mk) {
t.Skip("skipping test as none driver does not support persistence")
}
mk.EnsureRunning()
kr := util.NewKubectlRunner(t)
kr := util.NewKubectlRunner(t, p)
curdir, err := filepath.Abs("")
if err != nil {
t.Errorf("Error getting the file path for current directory: %s", curdir)
@ -48,7 +49,7 @@ func TestPersistence(t *testing.T) {
}
verify := func(t *testing.T) {
if err := util.WaitForBusyboxRunning(t, "default"); err != nil {
if err := util.WaitForBusyboxRunning(t, "default", "minikube"); err != nil {
t.Fatalf("waiting for busybox to be up: %v", err)
}

View File

@ -26,10 +26,10 @@ import (
// testProfileList tests the `minikube profile list` command
func testProfileList(t *testing.T) {
t.Parallel()
profile := "minikube"
mk := NewMinikubeRunner(t, "--wait=false")
p := "minikube"
mk := NewMinikubeRunner(t, p, "--wait=false")
out := mk.RunCommand("profile list", true)
if !strings.Contains(out, profile) {
t.Errorf("Error , failed to read profile name (%s) in `profile list` command output : \n %q ", profile, out)
if !strings.Contains(out, p) {
t.Errorf("Error , failed to read profile name (%s) in `profile list` command output : \n %q ", p, out)
}
}

View File

@ -72,7 +72,9 @@ func TestProxy(t *testing.T) {
}
// making sure there is no running minikube to avoid https://github.com/kubernetes/minikube/issues/4132
mk := NewMinikubeRunner(t)
p := "minikube"
mk := NewMinikubeRunner(t, p)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
defer cancel()
_, _, err = mk.RunWithContext(ctx, "delete")
@ -109,7 +111,8 @@ func TestProxy(t *testing.T) {
// testProxyWarning checks user is warned correctly about the proxy related env vars
func testProxyWarning(t *testing.T) {
mk := NewMinikubeRunner(t, "--wait=false")
p := "minikube"
mk := NewMinikubeRunner(t, p, "--wait=false")
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
defer cancel()
startCmd := fmt.Sprintf("start %s %s", mk.StartArgs, mk.GlobalArgs)
@ -131,7 +134,8 @@ func testProxyWarning(t *testing.T) {
// testProxyDashboard checks if dashboard URL is accessible if proxy is set
func testProxyDashboard(t *testing.T) {
mk := NewMinikubeRunner(t, "--wait=false")
p := "minikube"
mk := NewMinikubeRunner(t, p, "--wait=false")
cmd, out := mk.RunDaemon("dashboard --url")
defer func() {
err := cmd.Process.Kill()

View File

@ -39,8 +39,9 @@ var (
)
func testProvisioning(t *testing.T) {
p := "minikube"
t.Parallel()
kr := util.NewKubectlRunner(t)
kr := util.NewKubectlRunner(t, p)
defer func() {
if out, err := kr.RunCommand([]string{"delete", "pvc", pvcName}); err != nil {

View File

@ -65,7 +65,8 @@ func TestStartStop(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
mk := NewMinikubeRunner(t)
p := "minikube"
mk := NewMinikubeRunner(t, p)
if !strings.Contains(test.name, "docker") && usingNoneDriver(mk) {
t.Skipf("skipping %s - incompatible with none driver", test.name)
}
@ -83,7 +84,7 @@ func TestStartStop(t *testing.T) {
}
// check for the current-context before and after the stop
kr := util.NewKubectlRunner(t)
kr := util.NewKubectlRunner(t, p)
currentContext, err := kr.RunCommand([]string{"config", "current-context"})
if err != nil {
t.Fatalf("Failed to fetch current-context")

View File

@ -46,7 +46,8 @@ func testTunnel(t *testing.T) {
}
t.Log("starting tunnel test...")
mk := NewMinikubeRunner(t, "--wait=false")
p := "minikube"
mk := NewMinikubeRunner(t, p, "--wait=false")
go func() {
output := mk.RunCommand("tunnel --alsologtostderr -v 8 --logtostderr", true)
if t.Failed() {
@ -60,7 +61,7 @@ func testTunnel(t *testing.T) {
t.Fatal(errors.Wrap(err, "cleaning up tunnels"))
}
kr := util.NewKubectlRunner(t)
kr := util.NewKubectlRunner(t, p)
t.Log("deploying nginx...")
curdir, err := filepath.Abs("")

View File

@ -27,8 +27,8 @@ import (
)
// WaitForBusyboxRunning waits until busybox pod to be running
func WaitForBusyboxRunning(t *testing.T, namespace string) error {
client, err := commonutil.GetClient()
func WaitForBusyboxRunning(t *testing.T, namespace string, miniProfile string) error {
client, err := commonutil.GetClient(miniProfile)
if err != nil {
return errors.Wrap(err, "getting kubernetes client")
}
@ -37,8 +37,8 @@ func WaitForBusyboxRunning(t *testing.T, namespace string) error {
}
// WaitForIngressControllerRunning waits until ingress controller pod to be running
func WaitForIngressControllerRunning(t *testing.T) error {
client, err := commonutil.GetClient()
func WaitForIngressControllerRunning(t *testing.T, miniProfile string) error {
client, err := commonutil.GetClient(miniProfile)
if err != nil {
return errors.Wrap(err, "getting kubernetes client")
}
@ -56,8 +56,8 @@ func WaitForIngressControllerRunning(t *testing.T) error {
}
// WaitForIngressDefaultBackendRunning waits until ingress default backend pod to be running
func WaitForIngressDefaultBackendRunning(t *testing.T) error {
client, err := commonutil.GetClient()
func WaitForIngressDefaultBackendRunning(t *testing.T, miniProfile string) error {
client, err := commonutil.GetClient(miniProfile)
if err != nil {
return errors.Wrap(err, "getting kubernetes client")
}
@ -78,8 +78,8 @@ func WaitForIngressDefaultBackendRunning(t *testing.T) error {
}
// WaitForGvisorControllerRunning waits for the gvisor controller pod to be running
func WaitForGvisorControllerRunning(t *testing.T) error {
client, err := commonutil.GetClient()
func WaitForGvisorControllerRunning(t *testing.T, miniProfile string) error {
client, err := commonutil.GetClient(miniProfile)
if err != nil {
return errors.Wrap(err, "getting kubernetes client")
}
@ -92,8 +92,8 @@ func WaitForGvisorControllerRunning(t *testing.T) error {
}
// WaitForGvisorControllerDeleted waits for the gvisor controller pod to be deleted
func WaitForGvisorControllerDeleted() error {
client, err := commonutil.GetClient()
func WaitForGvisorControllerDeleted(miniProfile string) error {
client, err := commonutil.GetClient(miniProfile)
if err != nil {
return errors.Wrap(err, "getting kubernetes client")
}
@ -106,8 +106,8 @@ func WaitForGvisorControllerDeleted() error {
}
// WaitForUntrustedNginxRunning waits for the untrusted nginx pod to start running
func WaitForUntrustedNginxRunning() error {
client, err := commonutil.GetClient()
func WaitForUntrustedNginxRunning(miniProfile string) error {
client, err := commonutil.GetClient(miniProfile)
if err != nil {
return errors.Wrap(err, "getting kubernetes client")
}
@ -120,8 +120,8 @@ func WaitForUntrustedNginxRunning() error {
}
// WaitForFailedCreatePodSandBoxEvent waits for a FailedCreatePodSandBox event to appear
func WaitForFailedCreatePodSandBoxEvent() error {
client, err := commonutil.GetClient()
func WaitForFailedCreatePodSandBoxEvent(miniProfile string) error {
client, err := commonutil.GetClient(miniProfile)
if err != nil {
return errors.Wrap(err, "getting kubernetes client")
}
@ -132,8 +132,8 @@ func WaitForFailedCreatePodSandBoxEvent() error {
}
// WaitForNginxRunning waits for nginx service to be up
func WaitForNginxRunning(t *testing.T) error {
client, err := commonutil.GetClient()
func WaitForNginxRunning(t *testing.T, miniProfile string) error {
client, err := commonutil.GetClient(miniProfile)
if err != nil {
return errors.Wrap(err, "getting kubernetes client")

View File

@ -38,12 +38,12 @@ type KubectlRunner struct {
}
// NewKubectlRunner creates a new KubectlRunner
func NewKubectlRunner(t *testing.T) *KubectlRunner {
func NewKubectlRunner(t *testing.T, profile string) *KubectlRunner {
p, err := exec.LookPath(kubectlBinary)
if err != nil {
t.Fatalf("Couldn't find kubectl on path.")
}
return &KubectlRunner{Profile: "minikube", BinaryPath: p, T: t}
return &KubectlRunner{Profile: profile, BinaryPath: p, T: t}
}
// RunCommandParseOutput runs a command and parses the JSON output

View File

@ -181,9 +181,7 @@ func (m *MinikubeRunner) RunDaemon2(cmdStr string) (*exec.Cmd, *bufio.Reader, *b
// SSH returns the output of running a command using SSH
func (m *MinikubeRunner) SSH(cmdStr string) (string, error) {
profileArg := fmt.Sprintf(" -p=%s", m.Profile)
cmdStr += profileArg
profileArg := fmt.Sprintf("-p=%s", m.Profile)
path, _ := filepath.Abs(m.BinaryPath)
cmd := exec.Command(path, profileArg, "ssh", cmdStr)

View File

@ -66,7 +66,8 @@ func downloadMinikubeBinary(version string) (*os.File, error) {
// the odlest supported k8s version and then runs the current head minikube
// and it tries to upgrade from the older supported k8s to news supported k8s
func TestVersionUpgrade(t *testing.T) {
mkCurrent := NewMinikubeRunner(t)
p := "minikube"
mkCurrent := NewMinikubeRunner(t, p)
mkCurrent.RunCommand("delete", true)
mkCurrent.CheckStatus(state.None.String())
tf, err := downloadMinikubeBinary("latest")
@ -75,7 +76,7 @@ func TestVersionUpgrade(t *testing.T) {
}
defer os.Remove(tf.Name())
mkRelease := NewMinikubeRunner(t)
mkRelease := NewMinikubeRunner(t, p)
mkRelease.BinaryPath = tf.Name()
// For full coverage: also test upgrading from oldest to newest supported k8s release
mkRelease.Start(fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion))