Merge branch 'master' of github.com:kubernetes/minikube into create-node
commit
9df04324a2
|
@ -379,7 +379,7 @@ func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName st
|
||||||
|
|
||||||
if client.Major != cluster.Major || minorSkew > 1 {
|
if client.Major != cluster.Major || minorSkew > 1 {
|
||||||
out.Ln("")
|
out.Ln("")
|
||||||
out.WarningT("{{.path}} is v{{.client_version}}, which may be incompatible with Kubernetes v{{.cluster_version}}.",
|
out.WarningT("{{.path}} is version {{.client_version}}, which may be incompatible with Kubernetes {{.cluster_version}}.",
|
||||||
out.V{"path": path, "client_version": client, "cluster_version": cluster})
|
out.V{"path": path, "client_version": client, "cluster_version": cluster})
|
||||||
out.ErrT(out.Tip, "You can also use 'minikube kubectl -- get pods' to invoke a matching version",
|
out.ErrT(out.Tip, "You can also use 'minikube kubectl -- get pods' to invoke a matching version",
|
||||||
out.V{"path": path, "client_version": client})
|
out.V{"path": path, "client_version": client})
|
||||||
|
@ -843,7 +843,7 @@ func validateFlags(cmd *cobra.Command, drvName string) {
|
||||||
version, _ := util.ParseKubernetesVersion(getKubernetesVersion(nil))
|
version, _ := util.ParseKubernetesVersion(getKubernetesVersion(nil))
|
||||||
if version.GTE(semver.MustParse("1.18.0-beta.1")) {
|
if version.GTE(semver.MustParse("1.18.0-beta.1")) {
|
||||||
if _, err := exec.LookPath("conntrack"); err != nil {
|
if _, err := exec.LookPath("conntrack"); err != nil {
|
||||||
exit.WithCodeT(exit.Config, "Sorry, Kubernetes v{{.k8sVersion}} requires conntrack to be installed in root's path", out.V{"k8sVersion": version.String()})
|
exit.WithCodeT(exit.Config, "Sorry, Kubernetes {{.k8sVersion}} requires conntrack to be installed in root's path", out.V{"k8sVersion": version.String()})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1009,26 +1009,26 @@ func getKubernetesVersion(old *config.ClusterConfig) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
suggestedName := old.Name + "2"
|
suggestedName := old.Name + "2"
|
||||||
out.T(out.Conflict, "You have selected Kubernetes v{{.new}}, but the existing cluster is running Kubernetes v{{.old}}", out.V{"new": nvs, "old": ovs, "profile": profileArg})
|
out.T(out.Conflict, "You have selected Kubernetes {{.new}}, but the existing cluster is running Kubernetes {{.old}}", out.V{"new": nvs, "old": ovs, "profile": profileArg})
|
||||||
exit.WithCodeT(exit.Config, `Non-destructive downgrades are not supported, but you can proceed with one of the following options:
|
exit.WithCodeT(exit.Config, `Non-destructive downgrades are not supported, but you can proceed with one of the following options:
|
||||||
|
|
||||||
1) Recreate the cluster with Kubernetes v{{.new}}, by running:
|
1) Recreate the cluster with Kubernetes {{.new}}, by running:
|
||||||
|
|
||||||
minikube delete{{.profile}}
|
minikube delete{{.profile}}
|
||||||
minikube start{{.profile}} --kubernetes-version={{.new}}
|
minikube start{{.profile}} --kubernetes-version={{.prefix}}{{.new}}
|
||||||
|
|
||||||
2) Create a second cluster with Kubernetes v{{.new}}, by running:
|
2) Create a second cluster with Kubernetes {{.new}}, by running:
|
||||||
|
|
||||||
minikube start -p {{.suggestedName}} --kubernetes-version={{.new}}
|
minikube start -p {{.suggestedName}} --kubernetes-version={{.prefix}}{{.new}}
|
||||||
|
|
||||||
3) Use the existing cluster at version Kubernetes v{{.old}}, by running:
|
3) Use the existing cluster at version Kubernetes {{.old}}, by running:
|
||||||
|
|
||||||
minikube start{{.profile}} --kubernetes-version={{.old}}
|
minikube start{{.profile}} --kubernetes-version={{.prefix}}{{.old}}
|
||||||
`, out.V{"new": nvs, "old": ovs, "profile": profileArg, "suggestedName": suggestedName})
|
`, out.V{"prefix": version.VersionPrefix, "new": nvs, "old": ovs, "profile": profileArg, "suggestedName": suggestedName})
|
||||||
|
|
||||||
}
|
}
|
||||||
if defaultVersion.GT(nvs) {
|
if defaultVersion.GT(nvs) {
|
||||||
out.T(out.New, "Kubernetes {{.new}} is now available. If you would like to upgrade, specify: --kubernetes-version={{.new}}", out.V{"new": defaultVersion})
|
out.T(out.New, "Kubernetes {{.new}} is now available. If you would like to upgrade, specify: --kubernetes-version={{.prefix}}{{.new}}", out.V{"prefix": version.VersionPrefix, "new": defaultVersion})
|
||||||
}
|
}
|
||||||
return nv
|
return nv
|
||||||
}
|
}
|
||||||
|
|
|
@ -353,12 +353,10 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k
|
||||||
|
|
||||||
// updateExistingConfigFromFlags will update the existing config from the flags - used on a second start
|
// updateExistingConfigFromFlags will update the existing config from the flags - used on a second start
|
||||||
// skipping updating existing docker env , docker opt, InsecureRegistry, registryMirror, extra-config, apiserver-ips
|
// skipping updating existing docker env , docker opt, InsecureRegistry, registryMirror, extra-config, apiserver-ips
|
||||||
func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterConfig) config.ClusterConfig { //nolint to supress cyclomatic complexity 45 of func `updateExistingConfigFromFlags` is high (> 30)
|
func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterConfig) config.ClusterConfig { //nolint to suppress cyclomatic complexity 45 of func `updateExistingConfigFromFlags` is high (> 30)
|
||||||
validateFlags(cmd, existing.Driver)
|
validateFlags(cmd, existing.Driver)
|
||||||
|
|
||||||
// Make a copy of existing to avoid making changes to the struct passed by reference
|
|
||||||
cc := *existing
|
cc := *existing
|
||||||
|
|
||||||
if cmd.Flags().Changed(containerRuntime) {
|
if cmd.Flags().Changed(containerRuntime) {
|
||||||
cc.KubernetesConfig.ContainerRuntime = viper.GetString(containerRuntime)
|
cc.KubernetesConfig.ContainerRuntime = viper.GetString(containerRuntime)
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,7 +33,7 @@ type rewrite struct {
|
||||||
// outputs possible drivers for the operating system
|
// outputs possible drivers for the operating system
|
||||||
func rewriteFlags(command *cobra.Command) error {
|
func rewriteFlags(command *cobra.Command) error {
|
||||||
rewrites := map[string][]rewrite{
|
rewrites := map[string][]rewrite{
|
||||||
"start": []rewrite{{
|
"start": {{
|
||||||
flag: "driver",
|
flag: "driver",
|
||||||
usage: "Used to specify the driver to run Kubernetes in. The list of available drivers depends on operating system.",
|
usage: "Used to specify the driver to run Kubernetes in. The list of available drivers depends on operating system.",
|
||||||
}, {
|
}, {
|
||||||
|
|
|
@ -31,7 +31,7 @@ const (
|
||||||
SystemPodsWaitKey = "system_pods"
|
SystemPodsWaitKey = "system_pods"
|
||||||
// DefaultSAWaitKey is the name used in the flags for default service account
|
// DefaultSAWaitKey is the name used in the flags for default service account
|
||||||
DefaultSAWaitKey = "default_sa"
|
DefaultSAWaitKey = "default_sa"
|
||||||
// AppsRunning is the name used in the flags for waiting for k8s-apps to be running
|
// AppsRunningKey is the name used in the flags for waiting for k8s-apps to be running
|
||||||
AppsRunningKey = "apps_running"
|
AppsRunningKey = "apps_running"
|
||||||
// NodeReadyKey is the name used in the flags for waiting for the node status to be ready
|
// NodeReadyKey is the name used in the flags for waiting for the node status to be ready
|
||||||
NodeReadyKey = "node_ready"
|
NodeReadyKey = "node_ready"
|
||||||
|
|
|
@ -51,7 +51,7 @@ const (
|
||||||
DefaultServiceCIDR = "10.96.0.0/12"
|
DefaultServiceCIDR = "10.96.0.0/12"
|
||||||
// HostAlias is a DNS alias to the the container/VM host IP
|
// HostAlias is a DNS alias to the the container/VM host IP
|
||||||
HostAlias = "host.minikube.internal"
|
HostAlias = "host.minikube.internal"
|
||||||
// ControlPaneAlias is a DNS alias pointing to the apiserver frontend
|
// ControlPlaneAlias is a DNS alias pointing to the apiserver frontend
|
||||||
ControlPlaneAlias = "control-plane.minikube.internal"
|
ControlPlaneAlias = "control-plane.minikube.internal"
|
||||||
|
|
||||||
// DockerHostEnv is used for docker daemon settings
|
// DockerHostEnv is used for docker daemon settings
|
||||||
|
|
|
@ -29,6 +29,7 @@ import (
|
||||||
"k8s.io/minikube/pkg/minikube/constants"
|
"k8s.io/minikube/pkg/minikube/constants"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Binary holds a minikube binary
|
||||||
type Binary struct {
|
type Binary struct {
|
||||||
path string
|
path string
|
||||||
pr int
|
pr int
|
||||||
|
|
|
@ -80,15 +80,14 @@ func TestStartStop(t *testing.T) {
|
||||||
tc := tc
|
tc := tc
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
MaybeParallel(t)
|
MaybeParallel(t)
|
||||||
|
profile := UniqueProfileName(tc.name)
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), Minutes(40))
|
||||||
|
defer Cleanup(t, profile, cancel)
|
||||||
|
type validateStartStopFunc func(context.Context, *testing.T, string, string, string, []string)
|
||||||
if !strings.Contains(tc.name, "docker") && NoneDriver() {
|
if !strings.Contains(tc.name, "docker") && NoneDriver() {
|
||||||
t.Skipf("skipping %s - incompatible with none driver", t.Name())
|
t.Skipf("skipping %s - incompatible with none driver", t.Name())
|
||||||
}
|
}
|
||||||
|
|
||||||
profile := UniqueProfileName(tc.name)
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), Minutes(40))
|
|
||||||
defer CleanupWithLogs(t, profile, cancel)
|
|
||||||
|
|
||||||
waitFlag := "--wait=true"
|
waitFlag := "--wait=true"
|
||||||
if strings.Contains(tc.name, "cni") { // wait=app_running is broken for CNI https://github.com/kubernetes/minikube/issues/7354
|
if strings.Contains(tc.name, "cni") { // wait=app_running is broken for CNI https://github.com/kubernetes/minikube/issues/7354
|
||||||
waitFlag = "--wait=apiserver,system_pods,default_sa"
|
waitFlag = "--wait=apiserver,system_pods,default_sa"
|
||||||
|
@ -98,82 +97,142 @@ func TestStartStop(t *testing.T) {
|
||||||
startArgs = append(startArgs, StartArgs()...)
|
startArgs = append(startArgs, StartArgs()...)
|
||||||
startArgs = append(startArgs, fmt.Sprintf("--kubernetes-version=%s", tc.version))
|
startArgs = append(startArgs, fmt.Sprintf("--kubernetes-version=%s", tc.version))
|
||||||
|
|
||||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...))
|
t.Run("serial", func(t *testing.T) {
|
||||||
if err != nil {
|
serialTests := []struct {
|
||||||
t.Fatalf("failed starting minikube -first start-. args %q: %v", rr.Command(), err)
|
name string
|
||||||
}
|
validator validateStartStopFunc
|
||||||
|
}{
|
||||||
if !strings.Contains(tc.name, "cni") {
|
{"FirstStart", validateFirstStart},
|
||||||
testPodScheduling(ctx, t, profile)
|
{"DeployApp", validateDeploying},
|
||||||
}
|
{"Stop", validateStop},
|
||||||
|
{"EnableAddonAfterStop", validateEnableAddonAfterStop},
|
||||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile, "--alsologtostderr", "-v=3"))
|
{"SecondStart", validateSecondStart},
|
||||||
if err != nil {
|
{"UserAppExistsAfterStop", validateAppExistsAfterStop},
|
||||||
t.Fatalf("failed stopping minikube - first stop-. args %q : %v", rr.Command(), err)
|
{"AddonExistsAfterStop", validateAddonAfterStop},
|
||||||
}
|
{"VerifyKubernetesImages", validateKubernetesImages},
|
||||||
|
{"Pause", validatePauseAfterSart},
|
||||||
// The none driver never really stops
|
|
||||||
if !NoneDriver() {
|
|
||||||
got := Status(ctx, t, Target(), profile, "Host")
|
|
||||||
if got != state.Stopped.String() {
|
|
||||||
t.Fatalf("expected post-stop host status to be -%q- but got *%q*", state.Stopped, got)
|
|
||||||
}
|
}
|
||||||
}
|
for _, stc := range serialTests {
|
||||||
|
tcName := tc.name
|
||||||
// Enable an addon to assert it comes up afterwards
|
tcVersion := tc.version
|
||||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "addons", "enable", "dashboard", "-p", profile))
|
stc := stc
|
||||||
if err != nil {
|
t.Run(stc.name, func(t *testing.T) {
|
||||||
t.Fatalf("failed to enable an addon post-stop. args %q: %v", rr.Command(), err)
|
stc.validator(ctx, t, profile, tcName, tcVersion, startArgs)
|
||||||
}
|
})
|
||||||
|
|
||||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...))
|
|
||||||
if err != nil {
|
|
||||||
// Explicit fatal so that failures don't move directly to deletion
|
|
||||||
t.Fatalf("failed to start minikube post-stop. args %q: %v", rr.Command(), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.Contains(tc.name, "cni") {
|
|
||||||
t.Logf("WARNING: cni mode requires additional setup before pods can schedule :(")
|
|
||||||
} else {
|
|
||||||
if _, err := PodWait(ctx, t, profile, "default", "integration-test=busybox", Minutes(7)); err != nil {
|
|
||||||
t.Fatalf("failed waiting for pod 'busybox' post-stop-start: %v", err)
|
|
||||||
}
|
|
||||||
if _, err := PodWait(ctx, t, profile, "kubernetes-dashboard", "k8s-app=kubernetes-dashboard", Minutes(9)); err != nil {
|
|
||||||
t.Fatalf("failed waiting for 'addon dashboard' pod post-stop-start: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
got := Status(ctx, t, Target(), profile, "Host")
|
|
||||||
if got != state.Running.String() {
|
|
||||||
t.Fatalf("expected host status after start-stop-start to be -%q- but got *%q*", state.Running, got)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !NoneDriver() {
|
|
||||||
testPulledImages(ctx, t, profile, tc.version)
|
|
||||||
}
|
|
||||||
|
|
||||||
testPause(ctx, t, profile)
|
|
||||||
|
|
||||||
if *cleanup {
|
|
||||||
// Normally handled by cleanuprofile, but not fatal there
|
|
||||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "delete", "-p", profile))
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("failed to clean up: args %q: %v", rr.Command(), err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "config", "get-contexts", profile))
|
if *cleanup {
|
||||||
if err != nil {
|
// Normally handled by cleanuprofile, but not fatal there
|
||||||
t.Logf("config context error: %v (may be ok)", err)
|
rr, err := Run(t, exec.CommandContext(ctx, Target(), "delete", "-p", profile))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("failed to clean up: args %q: %v", rr.Command(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "config", "get-contexts", profile))
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("config context error: %v (may be ok)", err)
|
||||||
|
}
|
||||||
|
if rr.ExitCode != 1 {
|
||||||
|
t.Errorf("expected exit code 1, got %d. output: %s", rr.ExitCode, rr.Output())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if rr.ExitCode != 1 {
|
|
||||||
t.Errorf("expected exit code 1, got %d. output: %s", rr.ExitCode, rr.Output())
|
})
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func validateFirstStart(ctx context.Context, t *testing.T, profile string, tcName string, tcVersion string, startArgs []string) {
|
||||||
|
defer PostMortemLogs(t, profile)
|
||||||
|
rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed starting minikube -first start-. args %q: %v", rr.Command(), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateDeploying(ctx context.Context, t *testing.T, profile string, tcName string, tcVersion string, startArgs []string) {
|
||||||
|
defer PostMortemLogs(t, profile)
|
||||||
|
if !strings.Contains(tcName, "cni") {
|
||||||
|
testPodScheduling(ctx, t, profile)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateStop(ctx context.Context, t *testing.T, profile string, tcName string, tcVersion string, startArgs []string) {
|
||||||
|
defer PostMortemLogs(t, profile)
|
||||||
|
rr, err := Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile, "--alsologtostderr", "-v=3"))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed stopping minikube - first stop-. args %q : %v", rr.Command(), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateEnableAddonAfterStop(ctx context.Context, t *testing.T, profile string, tcName string, tcVersion string, startArgs []string) {
|
||||||
|
defer PostMortemLogs(t, profile)
|
||||||
|
// The none driver never really stops
|
||||||
|
if !NoneDriver() {
|
||||||
|
got := Status(ctx, t, Target(), profile, "Host")
|
||||||
|
if got != state.Stopped.String() {
|
||||||
|
t.Errorf("expected post-stop host status to be -%q- but got *%q*", state.Stopped, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable an addon to assert it comes up afterwards
|
||||||
|
rr, err := Run(t, exec.CommandContext(ctx, Target(), "addons", "enable", "dashboard", "-p", profile))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("failed to enable an addon post-stop. args %q: %v", rr.Command(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateSecondStart(ctx context.Context, t *testing.T, profile string, tcName string, tcVersion string, startArgs []string) {
|
||||||
|
defer PostMortemLogs(t, profile)
|
||||||
|
rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...))
|
||||||
|
if err != nil {
|
||||||
|
// Explicit fatal so that failures don't move directly to deletion
|
||||||
|
t.Fatalf("failed to start minikube post-stop. args %q: %v", rr.Command(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
got := Status(ctx, t, Target(), profile, "Host")
|
||||||
|
if got != state.Running.String() {
|
||||||
|
t.Errorf("expected host status after start-stop-start to be -%q- but got *%q*", state.Running, got)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// validateAppExistsAfterStop verifies that a user's app will not vanish after a minikube stop
|
||||||
|
func validateAppExistsAfterStop(ctx context.Context, t *testing.T, profile string, tcName string, tcVersion string, startArgs []string) {
|
||||||
|
defer PostMortemLogs(t, profile)
|
||||||
|
if strings.Contains(tcName, "cni") {
|
||||||
|
t.Logf("WARNING: cni mode requires additional setup before pods can schedule :(")
|
||||||
|
} else if _, err := PodWait(ctx, t, profile, "kubernetes-dashboard", "k8s-app=kubernetes-dashboard", Minutes(9)); err != nil {
|
||||||
|
t.Errorf("failed waiting for 'addon dashboard' pod post-stop-start: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// validateAddonAfterStop validates that an addon which was enabled when minikube is stopped will be enabled and working..
|
||||||
|
func validateAddonAfterStop(ctx context.Context, t *testing.T, profile string, tcName string, tcVersion string, startArgs []string) {
|
||||||
|
defer PostMortemLogs(t, profile)
|
||||||
|
if strings.Contains(tcName, "cni") {
|
||||||
|
t.Logf("WARNING: cni mode requires additional setup before pods can schedule :(")
|
||||||
|
} else if _, err := PodWait(ctx, t, profile, "kubernetes-dashboard", "k8s-app=kubernetes-dashboard", Minutes(9)); err != nil {
|
||||||
|
t.Errorf("failed waiting for 'addon dashboard' pod post-stop-start: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateKubernetesImages(ctx context.Context, t *testing.T, profile string, tcName string, tcVersion string, startArgs []string) {
|
||||||
|
defer PostMortemLogs(t, profile)
|
||||||
|
if !NoneDriver() {
|
||||||
|
testPulledImages(ctx, t, profile, tcVersion)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func validatePauseAfterSart(ctx context.Context, t *testing.T, profile string, tcName string, tcVersion string, startArgs []string) {
|
||||||
|
defer PostMortemLogs(t, profile)
|
||||||
|
testPause(ctx, t, profile)
|
||||||
|
}
|
||||||
|
|
||||||
// testPodScheduling asserts that this configuration can schedule new pods
|
// testPodScheduling asserts that this configuration can schedule new pods
|
||||||
func testPodScheduling(ctx context.Context, t *testing.T, profile string) {
|
func testPodScheduling(ctx context.Context, t *testing.T, profile string) {
|
||||||
defer PostMortemLogs(t, profile)
|
defer PostMortemLogs(t, profile)
|
||||||
|
|
Loading…
Reference in New Issue