Merge branch 'master' of https://github.com/kubernetes/minikube into refactor-start-stop
commit
7c2dfef1fe
|
@ -740,7 +740,11 @@ func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) {
|
||||||
}, `The '{{.driver}}' provider was not found: {{.error}}`, out.V{"driver": name, "error": st.Error})
|
}, `The '{{.driver}}' provider was not found: {{.error}}`, out.V{"driver": name, "error": st.Error})
|
||||||
}
|
}
|
||||||
|
|
||||||
id := fmt.Sprintf("PROVIDER_%s_ERROR", strings.ToUpper(name))
|
id := st.Reason
|
||||||
|
if id == "" {
|
||||||
|
id = fmt.Sprintf("PROVIDER_%s_ERROR", strings.ToUpper(name))
|
||||||
|
}
|
||||||
|
|
||||||
code := reason.ExProviderUnavailable
|
code := reason.ExProviderUnavailable
|
||||||
|
|
||||||
if !st.Running {
|
if !st.Running {
|
||||||
|
|
|
@ -25,6 +25,7 @@ apiServer:
|
||||||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||||
controllerManager:
|
controllerManager:
|
||||||
extraArgs:
|
extraArgs:
|
||||||
|
allocate-node-cidrs: "true"
|
||||||
leader-elect: "false"
|
leader-elect: "false"
|
||||||
scheduler:
|
scheduler:
|
||||||
extraArgs:
|
extraArgs:
|
||||||
|
|
|
@ -25,6 +25,7 @@ apiServer:
|
||||||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||||
controllerManager:
|
controllerManager:
|
||||||
extraArgs:
|
extraArgs:
|
||||||
|
allocate-node-cidrs: "true"
|
||||||
leader-elect: "false"
|
leader-elect: "false"
|
||||||
scheduler:
|
scheduler:
|
||||||
extraArgs:
|
extraArgs:
|
||||||
|
|
|
@ -25,6 +25,7 @@ apiServer:
|
||||||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||||
controllerManager:
|
controllerManager:
|
||||||
extraArgs:
|
extraArgs:
|
||||||
|
allocate-node-cidrs: "true"
|
||||||
leader-elect: "false"
|
leader-elect: "false"
|
||||||
scheduler:
|
scheduler:
|
||||||
extraArgs:
|
extraArgs:
|
||||||
|
|
|
@ -27,6 +27,7 @@ apiServer:
|
||||||
feature-gates: "a=b"
|
feature-gates: "a=b"
|
||||||
controllerManager:
|
controllerManager:
|
||||||
extraArgs:
|
extraArgs:
|
||||||
|
allocate-node-cidrs: "true"
|
||||||
feature-gates: "a=b"
|
feature-gates: "a=b"
|
||||||
kube-api-burst: "32"
|
kube-api-burst: "32"
|
||||||
leader-elect: "false"
|
leader-elect: "false"
|
||||||
|
|
|
@ -25,6 +25,7 @@ apiServer:
|
||||||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||||
controllerManager:
|
controllerManager:
|
||||||
extraArgs:
|
extraArgs:
|
||||||
|
allocate-node-cidrs: "true"
|
||||||
leader-elect: "false"
|
leader-elect: "false"
|
||||||
scheduler:
|
scheduler:
|
||||||
extraArgs:
|
extraArgs:
|
||||||
|
|
|
@ -25,6 +25,7 @@ apiServer:
|
||||||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||||
controllerManager:
|
controllerManager:
|
||||||
extraArgs:
|
extraArgs:
|
||||||
|
allocate-node-cidrs: "true"
|
||||||
leader-elect: "false"
|
leader-elect: "false"
|
||||||
scheduler:
|
scheduler:
|
||||||
extraArgs:
|
extraArgs:
|
||||||
|
|
|
@ -25,6 +25,7 @@ apiServer:
|
||||||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||||
controllerManager:
|
controllerManager:
|
||||||
extraArgs:
|
extraArgs:
|
||||||
|
allocate-node-cidrs: "true"
|
||||||
leader-elect: "false"
|
leader-elect: "false"
|
||||||
scheduler:
|
scheduler:
|
||||||
extraArgs:
|
extraArgs:
|
||||||
|
@ -41,7 +42,7 @@ etcd:
|
||||||
proxy-refresh-interval: "70000"
|
proxy-refresh-interval: "70000"
|
||||||
kubernetesVersion: v1.19.0
|
kubernetesVersion: v1.19.0
|
||||||
networking:
|
networking:
|
||||||
dnsDomain: 1.1.1.1
|
dnsDomain: minikube.local
|
||||||
podSubnet: "10.244.0.0/16"
|
podSubnet: "10.244.0.0/16"
|
||||||
serviceSubnet: 10.96.0.0/12
|
serviceSubnet: 10.96.0.0/12
|
||||||
---
|
---
|
||||||
|
@ -51,7 +52,7 @@ authentication:
|
||||||
x509:
|
x509:
|
||||||
clientCAFile: /var/lib/minikube/certs/ca.crt
|
clientCAFile: /var/lib/minikube/certs/ca.crt
|
||||||
cgroupDriver: systemd
|
cgroupDriver: systemd
|
||||||
clusterDomain: "cluster.local"
|
clusterDomain: "minikube.local"
|
||||||
# disable disk resource management by default
|
# disable disk resource management by default
|
||||||
imageGCHighThresholdPercent: 100
|
imageGCHighThresholdPercent: 100
|
||||||
evictionHard:
|
evictionHard:
|
||||||
|
|
|
@ -26,6 +26,7 @@ apiServer:
|
||||||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||||
controllerManager:
|
controllerManager:
|
||||||
extraArgs:
|
extraArgs:
|
||||||
|
allocate-node-cidrs: "true"
|
||||||
leader-elect: "false"
|
leader-elect: "false"
|
||||||
scheduler:
|
scheduler:
|
||||||
extraArgs:
|
extraArgs:
|
||||||
|
|
|
@ -26,6 +26,7 @@ apiServer:
|
||||||
fail-no-swap: "true"
|
fail-no-swap: "true"
|
||||||
controllerManager:
|
controllerManager:
|
||||||
extraArgs:
|
extraArgs:
|
||||||
|
allocate-node-cidrs: "true"
|
||||||
kube-api-burst: "32"
|
kube-api-burst: "32"
|
||||||
leader-elect: "false"
|
leader-elect: "false"
|
||||||
scheduler:
|
scheduler:
|
||||||
|
|
|
@ -27,10 +27,10 @@ import (
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// DefaultKubernetesVersion is the default Kubernetes version
|
// DefaultKubernetesVersion is the default Kubernetes version
|
||||||
DefaultKubernetesVersion = "v1.20.0"
|
DefaultKubernetesVersion = "v1.20.2"
|
||||||
// NewestKubernetesVersion is the newest Kubernetes version to test against
|
// NewestKubernetesVersion is the newest Kubernetes version to test against
|
||||||
// NOTE: You may need to update coreDNS & etcd versions in pkg/minikube/bootstrapper/images/images.go
|
// NOTE: You may need to update coreDNS & etcd versions in pkg/minikube/bootstrapper/images/images.go
|
||||||
NewestKubernetesVersion = "v1.20.0"
|
NewestKubernetesVersion = "v1.20.3-rc.0"
|
||||||
// OldestKubernetesVersion is the oldest Kubernetes version to test against
|
// OldestKubernetesVersion is the oldest Kubernetes version to test against
|
||||||
OldestKubernetesVersion = "v1.13.0"
|
OldestKubernetesVersion = "v1.13.0"
|
||||||
// DefaultClusterName is the default nane for the k8s cluster
|
// DefaultClusterName is the default nane for the k8s cluster
|
||||||
|
|
|
@ -95,8 +95,10 @@ func status() registry.State {
|
||||||
cmd := exec.CommandContext(ctx, oci.Docker, "version", "--format", "{{.Server.Os}}-{{.Server.Version}}")
|
cmd := exec.CommandContext(ctx, oci.Docker, "version", "--format", "{{.Server.Os}}-{{.Server.Version}}")
|
||||||
o, err := cmd.Output()
|
o, err := cmd.Output()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
reason := ""
|
||||||
if ctx.Err() == context.DeadlineExceeded {
|
if ctx.Err() == context.DeadlineExceeded {
|
||||||
err = errors.Wrapf(err, "deadline exceeded running %q", strings.Join(cmd.Args, " "))
|
err = errors.Wrapf(err, "deadline exceeded running %q", strings.Join(cmd.Args, " "))
|
||||||
|
reason = "PROVIDER_DOCKER_DEADLINE_EXCEEDED"
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.Warningf("docker version returned error: %v", err)
|
klog.Warningf("docker version returned error: %v", err)
|
||||||
|
@ -104,26 +106,25 @@ func status() registry.State {
|
||||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||||
stderr := strings.TrimSpace(string(exitErr.Stderr))
|
stderr := strings.TrimSpace(string(exitErr.Stderr))
|
||||||
newErr := fmt.Errorf(`%q %v: %s`, strings.Join(cmd.Args, " "), exitErr, stderr)
|
newErr := fmt.Errorf(`%q %v: %s`, strings.Join(cmd.Args, " "), exitErr, stderr)
|
||||||
|
return suggestFix("version", exitErr.ExitCode(), stderr, newErr)
|
||||||
return suggestFix(stderr, newErr)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return registry.State{Error: err, Installed: true, Healthy: false, Fix: "Restart the Docker service", Doc: docURL}
|
return registry.State{Reason: reason, Error: err, Installed: true, Healthy: false, Fix: "Restart the Docker service", Doc: docURL}
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.Infof("docker version: %s", o)
|
klog.Infof("docker version: %s", o)
|
||||||
if strings.Contains(string(o), "windows-") {
|
if strings.Contains(string(o), "windows-") {
|
||||||
return registry.State{Error: oci.ErrWindowsContainers, Installed: true, Healthy: false, Fix: "Change container type to \"linux\" in Docker Desktop settings", Doc: docURL + "#verify-docker-container-type-is-linux"}
|
return registry.State{Reason: "PROVIDER_DOCKER_WINDOWS_CONTAINERS", Error: oci.ErrWindowsContainers, Installed: true, Healthy: false, Fix: "Change container type to \"linux\" in Docker Desktop settings", Doc: docURL + "#verify-docker-container-type-is-linux"}
|
||||||
}
|
}
|
||||||
|
|
||||||
si, err := oci.CachedDaemonInfo("docker")
|
si, err := oci.CachedDaemonInfo("docker")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// No known fix because we haven't yet seen a failure here
|
// No known fix because we haven't yet seen a failure here
|
||||||
return registry.State{Error: errors.Wrap(err, "docker info"), Installed: true, Healthy: false, Doc: docURL}
|
return registry.State{Reason: "PROVIDER_DOCKER_INFO_FAILED", Error: errors.Wrap(err, "docker info"), Installed: true, Healthy: false, Doc: docURL}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, serr := range si.Errors {
|
for _, serr := range si.Errors {
|
||||||
return suggestFix(serr, fmt.Errorf("docker info error: %s", serr))
|
return suggestFix("info", -1, serr, fmt.Errorf("docker info error: %s", serr))
|
||||||
}
|
}
|
||||||
|
|
||||||
return checkNeedsImprovement()
|
return checkNeedsImprovement()
|
||||||
|
@ -157,23 +158,43 @@ func checkOverlayMod() registry.State {
|
||||||
}
|
}
|
||||||
|
|
||||||
// suggestFix matches a stderr with possible fix for the docker driver
|
// suggestFix matches a stderr with possible fix for the docker driver
|
||||||
func suggestFix(stderr string, err error) registry.State {
|
func suggestFix(src string, exitcode int, stderr string, err error) registry.State {
|
||||||
if strings.Contains(stderr, "permission denied") && runtime.GOOS == "linux" {
|
if strings.Contains(stderr, "permission denied") && runtime.GOOS == "linux" {
|
||||||
return registry.State{Error: err, Installed: true, Running: true, Healthy: false, Fix: "Add your user to the 'docker' group: 'sudo usermod -aG docker $USER && newgrp docker'", Doc: "https://docs.docker.com/engine/install/linux-postinstall/"}
|
return registry.State{Reason: "PROVIDER_DOCKER_NEWGRP", Error: err, Installed: true, Running: true, Healthy: false, Fix: "Add your user to the 'docker' group: 'sudo usermod -aG docker $USER && newgrp docker'", Doc: "https://docs.docker.com/engine/install/linux-postinstall/"}
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.Contains(stderr, "/pipe/docker_engine: The system cannot find the file specified.") && runtime.GOOS == "windows" {
|
if strings.Contains(stderr, "/pipe/docker_engine: The system cannot find the file specified.") && runtime.GOOS == "windows" {
|
||||||
return registry.State{Error: err, Installed: true, Running: false, Healthy: false, Fix: "Start the Docker service. If Docker is already running, you may need to reset Docker to factory settings with: Settings > Reset.", Doc: "https://github.com/docker/for-win/issues/1825#issuecomment-450501157"}
|
return registry.State{Reason: "PROVIDER_DOCKER_PIPE_NOT_FOUND", Error: err, Installed: true, Running: false, Healthy: false, Fix: "Start the Docker service. If Docker is already running, you may need to reset Docker to factory settings with: Settings > Reset.", Doc: "https://github.com/docker/for-win/issues/1825#issuecomment-450501157"}
|
||||||
}
|
}
|
||||||
|
|
||||||
if dockerNotRunning(stderr) {
|
reason := dockerNotRunning(stderr)
|
||||||
return registry.State{Error: err, Installed: true, Running: false, Healthy: false, Fix: "Start the Docker service", Doc: docURL}
|
if reason != "" {
|
||||||
|
return registry.State{Reason: reason, Error: err, Installed: true, Running: false, Healthy: false, Fix: "Start the Docker service", Doc: docURL}
|
||||||
}
|
}
|
||||||
|
|
||||||
// We don't have good advice, but at least we can provide a good error message
|
// We don't have good advice, but at least we can provide a good error message
|
||||||
return registry.State{Error: err, Installed: true, Running: true, Healthy: false, Doc: docURL}
|
reason = strings.ToUpper(fmt.Sprintf("PROVIDER_DOCKER_%s_ERROR", src))
|
||||||
|
if exitcode > 0 {
|
||||||
|
reason = strings.ToUpper(fmt.Sprintf("PROVIDER_DOCKER_%s_EXIT_%d", src, exitcode))
|
||||||
|
}
|
||||||
|
return registry.State{Reason: reason, Error: err, Installed: true, Running: true, Healthy: false, Doc: docURL}
|
||||||
}
|
}
|
||||||
|
|
||||||
func dockerNotRunning(s string) bool {
|
// Return a reason code for Docker not running
|
||||||
return strings.Contains(s, "Cannot connect") || strings.Contains(s, "refused") || strings.Contains(s, "Is the docker daemon running") || strings.Contains(s, "docker daemon is not running")
|
func dockerNotRunning(s string) string {
|
||||||
|
// These codes are explicitly in order of the most likely to be helpful to a user
|
||||||
|
|
||||||
|
if strings.Contains(s, "Is the docker daemon running") || strings.Contains(s, "docker daemon is not running") {
|
||||||
|
return "PROVIDER_DOCKER_NOT_RUNNING"
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.Contains(s, "Cannot connect") {
|
||||||
|
return "PROVIDER_DOCKER_CANNOT_CONNECT"
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.Contains(s, "refused") {
|
||||||
|
return "PROVIDER_DOCKER_REFUSED"
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
}
|
}
|
||||||
|
|
|
@ -79,8 +79,10 @@ type State struct {
|
||||||
Running bool // it at least appears to be running
|
Running bool // it at least appears to be running
|
||||||
NeedsImprovement bool // healthy but could be improved
|
NeedsImprovement bool // healthy but could be improved
|
||||||
Error error
|
Error error
|
||||||
Fix string
|
|
||||||
Doc string
|
Reason string // A reason ID, propagated to reason.Kind.ID
|
||||||
|
Fix string
|
||||||
|
Doc string
|
||||||
}
|
}
|
||||||
|
|
||||||
// DriverDef defines how to initialize and load a machine driver
|
// DriverDef defines how to initialize and load a machine driver
|
||||||
|
|
|
@ -66,7 +66,7 @@ minikube start [flags]
|
||||||
--interactive Allow user prompts for more information (default true)
|
--interactive Allow user prompts for more information (default true)
|
||||||
--iso-url strings Locations to fetch the minikube ISO from. (default [https://storage.googleapis.com/minikube/iso/minikube-v1.17.0.iso,https://github.com/kubernetes/minikube/releases/download/v1.17.0/minikube-v1.17.0.iso,https://kubernetes.oss-cn-hangzhou.aliyuncs.com/minikube/iso/minikube-v1.17.0.iso])
|
--iso-url strings Locations to fetch the minikube ISO from. (default [https://storage.googleapis.com/minikube/iso/minikube-v1.17.0.iso,https://github.com/kubernetes/minikube/releases/download/v1.17.0/minikube-v1.17.0.iso,https://kubernetes.oss-cn-hangzhou.aliyuncs.com/minikube/iso/minikube-v1.17.0.iso])
|
||||||
--keep-context This will keep the existing kubectl context and will create a minikube context.
|
--keep-context This will keep the existing kubectl context and will create a minikube context.
|
||||||
--kubernetes-version string The Kubernetes version that the minikube VM will use (ex: v1.2.3, 'stable' for v1.20.0, 'latest' for v1.20.0). Defaults to 'stable'.
|
--kubernetes-version string The Kubernetes version that the minikube VM will use (ex: v1.2.3, 'stable' for v1.20.2, 'latest' for v1.20.3-rc.0). Defaults to 'stable'.
|
||||||
--kvm-gpu Enable experimental NVIDIA GPU support in minikube
|
--kvm-gpu Enable experimental NVIDIA GPU support in minikube
|
||||||
--kvm-hidden Hide the hypervisor signature from the guest in minikube (kvm2 driver only)
|
--kvm-hidden Hide the hypervisor signature from the guest in minikube (kvm2 driver only)
|
||||||
--kvm-network string The KVM network name. (kvm2 driver only) (default "default")
|
--kvm-network string The KVM network name. (kvm2 driver only) (default "default")
|
||||||
|
|
|
@ -223,7 +223,6 @@ func validateAddonAfterStop(ctx context.Context, t *testing.T, profile string, t
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateKubernetesImages(ctx context.Context, t *testing.T, profile string, tcName string, tcVersion string, startArgs []string) {
|
func validateKubernetesImages(ctx context.Context, t *testing.T, profile string, tcName string, tcVersion string, startArgs []string) {
|
||||||
defer PostMortemLogs(t, profile)
|
|
||||||
if !NoneDriver() {
|
if !NoneDriver() {
|
||||||
testPulledImages(ctx, t, profile, tcVersion)
|
testPulledImages(ctx, t, profile, tcVersion)
|
||||||
}
|
}
|
||||||
|
@ -282,9 +281,17 @@ func testPulledImages(ctx context.Context, t *testing.T, profile string, version
|
||||||
jv := map[string][]struct {
|
jv := map[string][]struct {
|
||||||
Tags []string `json:"repoTags"`
|
Tags []string `json:"repoTags"`
|
||||||
}{}
|
}{}
|
||||||
err = json.Unmarshal(rr.Stdout.Bytes(), &jv)
|
|
||||||
|
// crictl includes warnings in STDOUT before printing JSON output
|
||||||
|
// this step trims the warnings before JSON output begins
|
||||||
|
// See #10175 for details on fixing these warnings
|
||||||
|
stdout := rr.Stdout.String()
|
||||||
|
index := strings.Index(stdout, "{")
|
||||||
|
stdout = stdout[index:]
|
||||||
|
|
||||||
|
err = json.Unmarshal([]byte(stdout), &jv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("failed to decode images json %v. output: %s", err, rr.Output())
|
t.Errorf("failed to decode images json %v. output:\n%s", err, stdout)
|
||||||
}
|
}
|
||||||
found := map[string]bool{}
|
found := map[string]bool{}
|
||||||
for _, img := range jv["images"] {
|
for _, img := range jv["images"] {
|
||||||
|
|
|
@ -63,7 +63,7 @@ func legacyStartArgs() []string {
|
||||||
return strings.Split(strings.Replace(*startArgs, "--driver", "--vm-driver", -1), " ")
|
return strings.Split(strings.Replace(*startArgs, "--driver", "--vm-driver", -1), " ")
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestRunningBinaryUpgrade does an upgrade test on a running cluster
|
// TestRunningBinaryUpgrade upgrades a running legacy cluster to head minikube
|
||||||
func TestRunningBinaryUpgrade(t *testing.T) {
|
func TestRunningBinaryUpgrade(t *testing.T) {
|
||||||
// not supported till v1.10, and passing new images to old releases isn't supported anyways
|
// not supported till v1.10, and passing new images to old releases isn't supported anyways
|
||||||
if TestingKicBaseImage() {
|
if TestingKicBaseImage() {
|
||||||
|
@ -92,7 +92,26 @@ func TestRunningBinaryUpgrade(t *testing.T) {
|
||||||
args := append([]string{"start", "-p", profile, "--memory=2200"}, legacyStartArgs()...)
|
args := append([]string{"start", "-p", profile, "--memory=2200"}, legacyStartArgs()...)
|
||||||
rr := &RunResult{}
|
rr := &RunResult{}
|
||||||
r := func() error {
|
r := func() error {
|
||||||
rr, err = Run(t, exec.CommandContext(ctx, tf.Name(), args...))
|
c := exec.CommandContext(ctx, tf.Name(), args...)
|
||||||
|
legacyEnv := []string{}
|
||||||
|
// replace the global KUBECONFIG with a fresh kubeconfig
|
||||||
|
// because for minikube<1.17.0 it can not read the new kubeconfigs that have extra "Extenions" block
|
||||||
|
// see: https://github.com/kubernetes/minikube/issues/10210
|
||||||
|
for _, e := range os.Environ() {
|
||||||
|
if !strings.Contains(e, "KUBECONFIG") { // get all global envs except the Kubeconfig which is used by new versions of minikubes
|
||||||
|
legacyEnv = append(legacyEnv, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// using a fresh kubeconfig for this test
|
||||||
|
legacyKubeConfig, err := ioutil.TempFile("", "legacy_kubeconfig")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create temp file for legacy kubeconfig %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
defer os.Remove(legacyKubeConfig.Name()) // clean up
|
||||||
|
legacyEnv = append(legacyEnv, fmt.Sprintf("KUBECONFIG=%s", legacyKubeConfig.Name()))
|
||||||
|
c.Env = legacyEnv
|
||||||
|
rr, err = Run(t, c)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -108,7 +127,7 @@ func TestRunningBinaryUpgrade(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestStoppedBinaryUpgrade does an upgrade test on a stopped cluster
|
// TestStoppedBinaryUpgrade starts a legacy minikube and stops it and then upgrades to head minikube
|
||||||
func TestStoppedBinaryUpgrade(t *testing.T) {
|
func TestStoppedBinaryUpgrade(t *testing.T) {
|
||||||
// not supported till v1.10, and passing new images to old releases isn't supported anyways
|
// not supported till v1.10, and passing new images to old releases isn't supported anyways
|
||||||
if TestingKicBaseImage() {
|
if TestingKicBaseImage() {
|
||||||
|
@ -139,7 +158,26 @@ func TestStoppedBinaryUpgrade(t *testing.T) {
|
||||||
args := append([]string{"start", "-p", profile, "--memory=2200"}, legacyStartArgs()...)
|
args := append([]string{"start", "-p", profile, "--memory=2200"}, legacyStartArgs()...)
|
||||||
rr := &RunResult{}
|
rr := &RunResult{}
|
||||||
r := func() error {
|
r := func() error {
|
||||||
rr, err = Run(t, exec.CommandContext(ctx, tf.Name(), args...))
|
c := exec.CommandContext(ctx, tf.Name(), args...)
|
||||||
|
legacyEnv := []string{}
|
||||||
|
// replace the global KUBECONFIG with a fresh kubeconfig
|
||||||
|
// because for minikube<1.17.0 it can not read the new kubeconfigs that have extra "Extenions" block
|
||||||
|
// see: https://github.com/kubernetes/minikube/issues/10210
|
||||||
|
for _, e := range os.Environ() {
|
||||||
|
if !strings.Contains(e, "KUBECONFIG") { // get all global envs except the Kubeconfig which is used by new versions of minikubes
|
||||||
|
legacyEnv = append(legacyEnv, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// using a fresh kubeconfig for this test
|
||||||
|
legacyKubeConfig, err := ioutil.TempFile("", "legacy_kubeconfig")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create temp file for legacy kubeconfig %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
defer os.Remove(legacyKubeConfig.Name()) // clean up
|
||||||
|
legacyEnv = append(legacyEnv, fmt.Sprintf("KUBECONFIG=%s", legacyKubeConfig.Name()))
|
||||||
|
c.Env = legacyEnv
|
||||||
|
rr, err = Run(t, c)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue