Merge branch 'master' of github.com:kubernetes/minikube into create-node

pull/8095/head
Sharif Elgamal 2020-05-15 09:16:31 -07:00
commit 9df04324a2
7 changed files with 147 additions and 89 deletions

View File

@ -379,7 +379,7 @@ func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName st
if client.Major != cluster.Major || minorSkew > 1 {
out.Ln("")
out.WarningT("{{.path}} is v{{.client_version}}, which may be incompatible with Kubernetes v{{.cluster_version}}.",
out.WarningT("{{.path}} is version {{.client_version}}, which may be incompatible with Kubernetes {{.cluster_version}}.",
out.V{"path": path, "client_version": client, "cluster_version": cluster})
out.ErrT(out.Tip, "You can also use 'minikube kubectl -- get pods' to invoke a matching version",
out.V{"path": path, "client_version": client})
@ -843,7 +843,7 @@ func validateFlags(cmd *cobra.Command, drvName string) {
version, _ := util.ParseKubernetesVersion(getKubernetesVersion(nil))
if version.GTE(semver.MustParse("1.18.0-beta.1")) {
if _, err := exec.LookPath("conntrack"); err != nil {
exit.WithCodeT(exit.Config, "Sorry, Kubernetes v{{.k8sVersion}} requires conntrack to be installed in root's path", out.V{"k8sVersion": version.String()})
exit.WithCodeT(exit.Config, "Sorry, Kubernetes {{.k8sVersion}} requires conntrack to be installed in root's path", out.V{"k8sVersion": version.String()})
}
}
}
@ -1009,26 +1009,26 @@ func getKubernetesVersion(old *config.ClusterConfig) string {
}
suggestedName := old.Name + "2"
out.T(out.Conflict, "You have selected Kubernetes v{{.new}}, but the existing cluster is running Kubernetes v{{.old}}", out.V{"new": nvs, "old": ovs, "profile": profileArg})
out.T(out.Conflict, "You have selected Kubernetes {{.new}}, but the existing cluster is running Kubernetes {{.old}}", out.V{"new": nvs, "old": ovs, "profile": profileArg})
exit.WithCodeT(exit.Config, `Non-destructive downgrades are not supported, but you can proceed with one of the following options:
1) Recreate the cluster with Kubernetes v{{.new}}, by running:
1) Recreate the cluster with Kubernetes {{.new}}, by running:
minikube delete{{.profile}}
minikube start{{.profile}} --kubernetes-version={{.new}}
minikube start{{.profile}} --kubernetes-version={{.prefix}}{{.new}}
2) Create a second cluster with Kubernetes v{{.new}}, by running:
2) Create a second cluster with Kubernetes {{.new}}, by running:
minikube start -p {{.suggestedName}} --kubernetes-version={{.new}}
minikube start -p {{.suggestedName}} --kubernetes-version={{.prefix}}{{.new}}
3) Use the existing cluster at version Kubernetes v{{.old}}, by running:
3) Use the existing cluster at version Kubernetes {{.old}}, by running:
minikube start{{.profile}} --kubernetes-version={{.old}}
`, out.V{"new": nvs, "old": ovs, "profile": profileArg, "suggestedName": suggestedName})
minikube start{{.profile}} --kubernetes-version={{.prefix}}{{.old}}
`, out.V{"prefix": version.VersionPrefix, "new": nvs, "old": ovs, "profile": profileArg, "suggestedName": suggestedName})
}
if defaultVersion.GT(nvs) {
out.T(out.New, "Kubernetes {{.new}} is now available. If you would like to upgrade, specify: --kubernetes-version={{.new}}", out.V{"new": defaultVersion})
out.T(out.New, "Kubernetes {{.new}} is now available. If you would like to upgrade, specify: --kubernetes-version={{.prefix}}{{.new}}", out.V{"prefix": version.VersionPrefix, "new": defaultVersion})
}
return nv
}

View File

@ -353,12 +353,10 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k
// updateExistingConfigFromFlags will update the existing config from the flags - used on a second start
// skipping updating existing docker env , docker opt, InsecureRegistry, registryMirror, extra-config, apiserver-ips
func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterConfig) config.ClusterConfig { //nolint to supress cyclomatic complexity 45 of func `updateExistingConfigFromFlags` is high (> 30)
func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterConfig) config.ClusterConfig { //nolint to suppress cyclomatic complexity 45 of func `updateExistingConfigFromFlags` is high (> 30)
validateFlags(cmd, existing.Driver)
// Make a copy of existing to avoid making changes to the struct passed by reference
cc := *existing
if cmd.Flags().Changed(containerRuntime) {
cc.KubernetesConfig.ContainerRuntime = viper.GetString(containerRuntime)
}

View File

@ -33,7 +33,7 @@ type rewrite struct {
// outputs possible drivers for the operating system
func rewriteFlags(command *cobra.Command) error {
rewrites := map[string][]rewrite{
"start": []rewrite{{
"start": {{
flag: "driver",
usage: "Used to specify the driver to run Kubernetes in. The list of available drivers depends on operating system.",
}, {

View File

@ -31,7 +31,7 @@ const (
SystemPodsWaitKey = "system_pods"
// DefaultSAWaitKey is the name used in the flags for default service account
DefaultSAWaitKey = "default_sa"
// AppsRunning is the name used in the flags for waiting for k8s-apps to be running
// AppsRunningKey is the name used in the flags for waiting for k8s-apps to be running
AppsRunningKey = "apps_running"
// NodeReadyKey is the name used in the flags for waiting for the node status to be ready
NodeReadyKey = "node_ready"

View File

@ -51,7 +51,7 @@ const (
DefaultServiceCIDR = "10.96.0.0/12"
// HostAlias is a DNS alias to the the container/VM host IP
HostAlias = "host.minikube.internal"
// ControlPaneAlias is a DNS alias pointing to the apiserver frontend
// ControlPlaneAlias is a DNS alias pointing to the apiserver frontend
ControlPlaneAlias = "control-plane.minikube.internal"
// DockerHostEnv is used for docker daemon settings

View File

@ -29,6 +29,7 @@ import (
"k8s.io/minikube/pkg/minikube/constants"
)
// Binary holds a minikube binary
type Binary struct {
path string
pr int

View File

@ -80,15 +80,14 @@ func TestStartStop(t *testing.T) {
tc := tc
t.Run(tc.name, func(t *testing.T) {
MaybeParallel(t)
profile := UniqueProfileName(tc.name)
ctx, cancel := context.WithTimeout(context.Background(), Minutes(40))
defer Cleanup(t, profile, cancel)
type validateStartStopFunc func(context.Context, *testing.T, string, string, string, []string)
if !strings.Contains(tc.name, "docker") && NoneDriver() {
t.Skipf("skipping %s - incompatible with none driver", t.Name())
}
profile := UniqueProfileName(tc.name)
ctx, cancel := context.WithTimeout(context.Background(), Minutes(40))
defer CleanupWithLogs(t, profile, cancel)
waitFlag := "--wait=true"
if strings.Contains(tc.name, "cni") { // wait=app_running is broken for CNI https://github.com/kubernetes/minikube/issues/7354
waitFlag = "--wait=apiserver,system_pods,default_sa"
@ -98,82 +97,142 @@ func TestStartStop(t *testing.T) {
startArgs = append(startArgs, StartArgs()...)
startArgs = append(startArgs, fmt.Sprintf("--kubernetes-version=%s", tc.version))
rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...))
if err != nil {
t.Fatalf("failed starting minikube -first start-. args %q: %v", rr.Command(), err)
}
if !strings.Contains(tc.name, "cni") {
testPodScheduling(ctx, t, profile)
}
rr, err = Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile, "--alsologtostderr", "-v=3"))
if err != nil {
t.Fatalf("failed stopping minikube - first stop-. args %q : %v", rr.Command(), err)
}
// The none driver never really stops
if !NoneDriver() {
got := Status(ctx, t, Target(), profile, "Host")
if got != state.Stopped.String() {
t.Fatalf("expected post-stop host status to be -%q- but got *%q*", state.Stopped, got)
t.Run("serial", func(t *testing.T) {
serialTests := []struct {
name string
validator validateStartStopFunc
}{
{"FirstStart", validateFirstStart},
{"DeployApp", validateDeploying},
{"Stop", validateStop},
{"EnableAddonAfterStop", validateEnableAddonAfterStop},
{"SecondStart", validateSecondStart},
{"UserAppExistsAfterStop", validateAppExistsAfterStop},
{"AddonExistsAfterStop", validateAddonAfterStop},
{"VerifyKubernetesImages", validateKubernetesImages},
{"Pause", validatePauseAfterSart},
}
}
// Enable an addon to assert it comes up afterwards
rr, err = Run(t, exec.CommandContext(ctx, Target(), "addons", "enable", "dashboard", "-p", profile))
if err != nil {
t.Fatalf("failed to enable an addon post-stop. args %q: %v", rr.Command(), err)
}
rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...))
if err != nil {
// Explicit fatal so that failures don't move directly to deletion
t.Fatalf("failed to start minikube post-stop. args %q: %v", rr.Command(), err)
}
if strings.Contains(tc.name, "cni") {
t.Logf("WARNING: cni mode requires additional setup before pods can schedule :(")
} else {
if _, err := PodWait(ctx, t, profile, "default", "integration-test=busybox", Minutes(7)); err != nil {
t.Fatalf("failed waiting for pod 'busybox' post-stop-start: %v", err)
}
if _, err := PodWait(ctx, t, profile, "kubernetes-dashboard", "k8s-app=kubernetes-dashboard", Minutes(9)); err != nil {
t.Fatalf("failed waiting for 'addon dashboard' pod post-stop-start: %v", err)
}
}
got := Status(ctx, t, Target(), profile, "Host")
if got != state.Running.String() {
t.Fatalf("expected host status after start-stop-start to be -%q- but got *%q*", state.Running, got)
}
if !NoneDriver() {
testPulledImages(ctx, t, profile, tc.version)
}
testPause(ctx, t, profile)
if *cleanup {
// Normally handled by cleanuprofile, but not fatal there
rr, err = Run(t, exec.CommandContext(ctx, Target(), "delete", "-p", profile))
if err != nil {
t.Errorf("failed to clean up: args %q: %v", rr.Command(), err)
for _, stc := range serialTests {
tcName := tc.name
tcVersion := tc.version
stc := stc
t.Run(stc.name, func(t *testing.T) {
stc.validator(ctx, t, profile, tcName, tcVersion, startArgs)
})
}
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "config", "get-contexts", profile))
if err != nil {
t.Logf("config context error: %v (may be ok)", err)
if *cleanup {
// Normally handled by cleanuprofile, but not fatal there
rr, err := Run(t, exec.CommandContext(ctx, Target(), "delete", "-p", profile))
if err != nil {
t.Errorf("failed to clean up: args %q: %v", rr.Command(), err)
}
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "config", "get-contexts", profile))
if err != nil {
t.Logf("config context error: %v (may be ok)", err)
}
if rr.ExitCode != 1 {
t.Errorf("expected exit code 1, got %d. output: %s", rr.ExitCode, rr.Output())
}
}
if rr.ExitCode != 1 {
t.Errorf("expected exit code 1, got %d. output: %s", rr.ExitCode, rr.Output())
}
}
})
})
}
})
}
func validateFirstStart(ctx context.Context, t *testing.T, profile string, tcName string, tcVersion string, startArgs []string) {
defer PostMortemLogs(t, profile)
rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...))
if err != nil {
t.Fatalf("failed starting minikube -first start-. args %q: %v", rr.Command(), err)
}
}
func validateDeploying(ctx context.Context, t *testing.T, profile string, tcName string, tcVersion string, startArgs []string) {
defer PostMortemLogs(t, profile)
if !strings.Contains(tcName, "cni") {
testPodScheduling(ctx, t, profile)
}
}
func validateStop(ctx context.Context, t *testing.T, profile string, tcName string, tcVersion string, startArgs []string) {
defer PostMortemLogs(t, profile)
rr, err := Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile, "--alsologtostderr", "-v=3"))
if err != nil {
t.Fatalf("failed stopping minikube - first stop-. args %q : %v", rr.Command(), err)
}
}
func validateEnableAddonAfterStop(ctx context.Context, t *testing.T, profile string, tcName string, tcVersion string, startArgs []string) {
defer PostMortemLogs(t, profile)
// The none driver never really stops
if !NoneDriver() {
got := Status(ctx, t, Target(), profile, "Host")
if got != state.Stopped.String() {
t.Errorf("expected post-stop host status to be -%q- but got *%q*", state.Stopped, got)
}
}
// Enable an addon to assert it comes up afterwards
rr, err := Run(t, exec.CommandContext(ctx, Target(), "addons", "enable", "dashboard", "-p", profile))
if err != nil {
t.Errorf("failed to enable an addon post-stop. args %q: %v", rr.Command(), err)
}
}
func validateSecondStart(ctx context.Context, t *testing.T, profile string, tcName string, tcVersion string, startArgs []string) {
defer PostMortemLogs(t, profile)
rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...))
if err != nil {
// Explicit fatal so that failures don't move directly to deletion
t.Fatalf("failed to start minikube post-stop. args %q: %v", rr.Command(), err)
}
got := Status(ctx, t, Target(), profile, "Host")
if got != state.Running.String() {
t.Errorf("expected host status after start-stop-start to be -%q- but got *%q*", state.Running, got)
}
}
// validateAppExistsAfterStop verifies that a user's app will not vanish after a minikube stop
func validateAppExistsAfterStop(ctx context.Context, t *testing.T, profile string, tcName string, tcVersion string, startArgs []string) {
defer PostMortemLogs(t, profile)
if strings.Contains(tcName, "cni") {
t.Logf("WARNING: cni mode requires additional setup before pods can schedule :(")
} else if _, err := PodWait(ctx, t, profile, "kubernetes-dashboard", "k8s-app=kubernetes-dashboard", Minutes(9)); err != nil {
t.Errorf("failed waiting for 'addon dashboard' pod post-stop-start: %v", err)
}
}
// validateAddonAfterStop validates that an addon which was enabled when minikube is stopped will be enabled and working..
func validateAddonAfterStop(ctx context.Context, t *testing.T, profile string, tcName string, tcVersion string, startArgs []string) {
defer PostMortemLogs(t, profile)
if strings.Contains(tcName, "cni") {
t.Logf("WARNING: cni mode requires additional setup before pods can schedule :(")
} else if _, err := PodWait(ctx, t, profile, "kubernetes-dashboard", "k8s-app=kubernetes-dashboard", Minutes(9)); err != nil {
t.Errorf("failed waiting for 'addon dashboard' pod post-stop-start: %v", err)
}
}
func validateKubernetesImages(ctx context.Context, t *testing.T, profile string, tcName string, tcVersion string, startArgs []string) {
defer PostMortemLogs(t, profile)
if !NoneDriver() {
testPulledImages(ctx, t, profile, tcVersion)
}
}
func validatePauseAfterSart(ctx context.Context, t *testing.T, profile string, tcName string, tcVersion string, startArgs []string) {
defer PostMortemLogs(t, profile)
testPause(ctx, t, profile)
}
// testPodScheduling asserts that this configuration can schedule new pods
func testPodScheduling(ctx context.Context, t *testing.T, profile string) {
defer PostMortemLogs(t, profile)