Merge branch 'master' into wrong-url

pull/7360/head
Thomas Stromberg 2020-04-02 08:32:54 -07:00
commit a71edc3920
110 changed files with 948 additions and 592 deletions

View File

@ -2,13 +2,15 @@
name: English
about: Report an issue
---
**Steps to reproduce the issue:** <!-- include the "minikube start" command! -->
<!--- Please include the "minikube start" command you used in your reproduction steps --->
**Steps to reproduce the issue:**
1.
2.
3.
**Full output of failed command:** <!-- use the "--alsologtostderr" flag for more logs -->
<!--- TIP: Add the "--alsologtostderr" flag to the command-line for more logs --->
**Full output of failed command:**

View File

@ -2,6 +2,9 @@
[![BuildStatus Widget]][BuildStatus Result]
[![GoReport Widget]][GoReport Status]
[![Github All Releases](https://img.shields.io/github/downloads/kubernetes/minikube/total.svg)](https://github.com/kubernetes/minikube/releases/latest)
[![Latest Release](https://img.shields.io/github/v/release/kubernetes/minikube?include_prereleases)](https://github.com/kubernetes/minikube/releases/latest)
[BuildStatus Result]: https://travis-ci.org/kubernetes/minikube
[BuildStatus Widget]: https://travis-ci.org/kubernetes/minikube.svg?branch=master

View File

@ -63,10 +63,8 @@ var dashboardCmd = &cobra.Command{
}
}
kubectl, err := exec.LookPath("kubectl")
if err != nil {
exit.WithCodeT(exit.NoInput, "kubectl not found in PATH, but is required for the dashboard. Installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl/")
}
kubectlVersion := co.Config.KubernetesConfig.KubernetesVersion
var err error
// Check dashboard status before enabling it
dashboardAddon := assets.Addons["dashboard"]
@ -90,7 +88,7 @@ var dashboardCmd = &cobra.Command{
}
out.ErrT(out.Launch, "Launching proxy ...")
p, hostPort, err := kubectlProxy(kubectl, cname)
p, hostPort, err := kubectlProxy(kubectlVersion, cname)
if err != nil {
exit.WithError("kubectl proxy", err)
}
@ -124,10 +122,17 @@ var dashboardCmd = &cobra.Command{
}
// kubectlProxy runs "kubectl proxy", returning host:port
func kubectlProxy(path string, contextName string) (*exec.Cmd, string, error) {
func kubectlProxy(kubectlVersion string, contextName string) (*exec.Cmd, string, error) {
// port=0 picks a random system port
cmd := exec.Command(path, "--context", contextName, "proxy", "--port=0")
kubectlArgs := []string{"--context", contextName, "proxy", "--port=0"}
var cmd *exec.Cmd
if kubectl, err := exec.LookPath("kubectl"); err == nil {
cmd = exec.Command(kubectl, kubectlArgs...)
} else if cmd, err = KubectlCommand(kubectlVersion, kubectlArgs...); err != nil {
return nil, "", err
}
stdoutPipe, err := cmd.StdoutPipe()
if err != nil {

View File

@ -43,17 +43,12 @@ minikube kubectl -- get pods --namespace kube-system`,
co := mustload.Healthy(ClusterFlagValue())
version := co.Config.KubernetesConfig.KubernetesVersion
if version == "" {
version = constants.DefaultKubernetesVersion
}
path, err := node.CacheKubectlBinary(version)
c, err := KubectlCommand(version, args...)
if err != nil {
out.ErrLn("Error caching kubectl: %v", err)
}
glog.Infof("Running %s %v", path, args)
c := exec.Command(path, args...)
c.Stdin = os.Stdin
c.Stdout = os.Stdout
c.Stderr = os.Stderr
@ -70,3 +65,17 @@ minikube kubectl -- get pods --namespace kube-system`,
}
},
}
// KubectlCommand will return kubectl command with a version matching the cluster
func KubectlCommand(version string, args ...string) (*exec.Cmd, error) {
if version == "" {
version = constants.DefaultKubernetesVersion
}
path, err := node.CacheKubectlBinary(version)
if err != nil {
return nil, err
}
return exec.Command(path, args...), nil
}

View File

@ -18,10 +18,8 @@ package cmd
import (
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/mustload"
"k8s.io/minikube/pkg/minikube/node"
"k8s.io/minikube/pkg/minikube/out"
@ -56,7 +54,7 @@ var nodeAddCmd = &cobra.Command{
}
if err := node.Add(cc, n); err != nil {
exit.WithError("Error adding node to cluster", err)
maybeDeleteAndRetry(*cc, n, nil, err)
}
out.T(out.Ready, "Successfully added {{.name}} to {{.cluster}}!", out.V{"name": name, "cluster": cc.Name})
@ -64,13 +62,10 @@ var nodeAddCmd = &cobra.Command{
}
func init() {
// TODO(https://github.com/kubernetes/minikube/issues/7366): We should figure out which minikube start flags to actually import
nodeAddCmd.Flags().BoolVar(&cp, "control-plane", false, "If true, the node added will also be a control plane in addition to a worker.")
nodeAddCmd.Flags().BoolVar(&worker, "worker", true, "If true, the added node will be marked for work. Defaults to true.")
//We should figure out which of these flags to actually import
startCmd.Flags().Visit(
func(f *pflag.Flag) {
nodeAddCmd.Flags().AddFlag(f)
},
)
nodeAddCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.")
nodeCmd.AddCommand(nodeAddCmd)
}

View File

@ -49,12 +49,15 @@ var nodeStartCmd = &cobra.Command{
exit.WithError("retrieving node", err)
}
// Start it up baby
node.Start(*cc, *n, nil, false)
_, err = node.Start(*cc, *n, nil, false)
if err != nil {
maybeDeleteAndRetry(*cc, *n, nil, err)
}
},
}
func init() {
nodeStartCmd.Flags().String("name", "", "The name of the node to start")
nodeStartCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.")
nodeCmd.AddCommand(nodeStartCmd)
}

View File

@ -124,6 +124,7 @@ const (
natNicType = "nat-nic-type"
nodes = "nodes"
preload = "preload"
deleteOnFailure = "delete-on-failure"
)
var (
@ -177,6 +178,7 @@ func initMinikubeFlags() {
startCmd.Flags().Bool(installAddons, true, "If set, install addons. Defaults to true.")
startCmd.Flags().IntP(nodes, "n", 1, "The number of nodes to spin up. Defaults to 1.")
startCmd.Flags().Bool(preload, true, "If set, download tarball of preloaded images if available to improve start time. Defaults to true.")
startCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.")
}
// initKubernetesFlags inits the commandline flags for kubernetes related options
@ -353,7 +355,10 @@ func runStart(cmd *cobra.Command, args []string) {
}
}
kubeconfig := node.Start(cc, n, existingAddons, true)
kubeconfig, err := node.Start(cc, n, existingAddons, true)
if err != nil {
kubeconfig = maybeDeleteAndRetry(cc, n, existingAddons, err)
}
numNodes := viper.GetInt(nodes)
if numNodes == 1 && existing != nil {
@ -371,6 +376,7 @@ func runStart(cmd *cobra.Command, args []string) {
ControlPlane: false,
KubernetesVersion: cc.KubernetesConfig.KubernetesVersion,
}
out.Ln("") // extra newline for clarity on the command line
err := node.Add(&cc, n)
if err != nil {
exit.WithError("adding node", err)
@ -432,22 +438,12 @@ func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName st
return nil
}
j, err := exec.Command(path, "version", "--client", "--output=json").Output()
gitVersion, err := kubectlVersion(path)
if err != nil {
return errors.Wrap(err, "exec")
return err
}
cv := struct {
ClientVersion struct {
GitVersion string `json:"gitVersion"`
} `json:"clientVersion"`
}{}
err = json.Unmarshal(j, &cv)
if err != nil {
return errors.Wrap(err, "unmarshal")
}
client, err := semver.Make(strings.TrimPrefix(cv.ClientVersion.GitVersion, version.VersionPrefix))
client, err := semver.Make(strings.TrimPrefix(gitVersion, version.VersionPrefix))
if err != nil {
return errors.Wrap(err, "client semver")
}
@ -466,6 +462,63 @@ func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName st
return nil
}
func maybeDeleteAndRetry(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool, originalErr error) *kubeconfig.Settings {
if viper.GetBool(deleteOnFailure) {
out.T(out.Warning, "Node {{.name}} failed to start, deleting and trying again.", out.V{"name": n.Name})
// Start failed, delete the cluster and try again
profile, err := config.LoadProfile(cc.Name)
if err != nil {
out.ErrT(out.Meh, `"{{.name}}" profile does not exist, trying anyways.`, out.V{"name": cc.Name})
}
err = deleteProfile(profile)
if err != nil {
out.WarningT("Failed to delete cluster {{.name}}, proceeding with retry anyway.", out.V{"name": cc.Name})
}
var kubeconfig *kubeconfig.Settings
for _, v := range cc.Nodes {
k, err := node.Start(cc, v, existingAddons, v.ControlPlane)
if v.ControlPlane {
kubeconfig = k
}
if err != nil {
// Ok we failed again, let's bail
exit.WithError("Start failed after cluster deletion", err)
}
}
return kubeconfig
}
// Don't delete the cluster unless they ask
exit.WithError("startup failed", originalErr)
return nil
}
func kubectlVersion(path string) (string, error) {
j, err := exec.Command(path, "version", "--client", "--output=json").Output()
if err != nil {
// really old kubernetes clients did not have the --output parameter
b, err := exec.Command(path, "version", "--client", "--short").Output()
if err != nil {
return "", errors.Wrap(err, "exec")
}
s := strings.TrimSpace(string(b))
return strings.Replace(s, "Client Version: ", "", 1), nil
}
cv := struct {
ClientVersion struct {
GitVersion string `json:"gitVersion"`
} `json:"clientVersion"`
}{}
err = json.Unmarshal(j, &cv)
if err != nil {
return "", errors.Wrap(err, "unmarshal")
}
return cv.ClientVersion.GitVersion, nil
}
func selectDriver(existing *config.ClusterConfig) registry.DriverState {
// Technically unrelated, but important to perform before detection
driver.SetLibvirtURI(viper.GetString(kvmQemuURI))

View File

@ -88,7 +88,7 @@ func stop(api libmachine.API, cluster config.ClusterConfig, n config.Node) bool
}
}
if err := retry.Expo(tryStop, 1*time.Second, 30*time.Second, 3); err != nil {
if err := retry.Expo(tryStop, 1*time.Second, 120*time.Second, 5); err != nil {
exit.WithError("Unable to stop VM", err)
}

View File

@ -17,20 +17,56 @@ limitations under the License.
package cmd
import (
"encoding/json"
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/out"
"k8s.io/minikube/pkg/version"
)
var (
versionOutput string
shortVersion bool
)
var versionCmd = &cobra.Command{
Use: "version",
Short: "Print the version of minikube",
Long: `Print the version of minikube.`,
Run: func(command *cobra.Command, args []string) {
out.Ln("minikube version: %v", version.GetVersion())
minikubeVersion := version.GetVersion()
gitCommitID := version.GetGitCommitID()
if gitCommitID != "" {
out.Ln("commit: %v", gitCommitID)
data := map[string]string{
"minikubeVersion": minikubeVersion,
"commit": gitCommitID,
}
switch versionOutput {
case "":
out.Ln("minikube version: %v", minikubeVersion)
if !shortVersion && gitCommitID != "" {
out.Ln("commit: %v", gitCommitID)
}
case "json":
json, err := json.Marshal(data)
if err != nil {
exit.WithError("version json failure", err)
}
out.Ln(string(json))
case "yaml":
yaml, err := yaml.Marshal(data)
if err != nil {
exit.WithError("version yaml failure", err)
}
out.Ln(string(yaml))
default:
exit.WithCodeT(exit.BadUsage, "error: --output must be 'yaml' or 'json'")
}
},
}
func init() {
versionCmd.Flags().StringVarP(&versionOutput, "output", "o", "", "One of 'yaml' or 'json'.")
versionCmd.Flags().BoolVar(&shortVersion, "short", false, "Print just the version number.")
}

View File

@ -4,7 +4,7 @@ publish = "site/public/"
command = "pwd && cd themes/docsy && git submodule update -f --init && cd ../.. && hugo"
[build.environment]
HUGO_VERSION = "0.59.0"
HUGO_VERSION = "0.68.3"
[context.production.environment]
HUGO_ENV = "production"

View File

@ -64,6 +64,8 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"

View File

@ -73,6 +73,7 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"

View File

@ -68,4 +68,17 @@ networking:
dnsDomain: {{if .DNSDomain}}{{.DNSDomain}}{{else}}cluster.local{{end}}
podSubnet: "{{.PodSubnet }}"
serviceSubnet: {{.ServiceCIDR}}
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
metricsBindAddress: {{.AdvertiseAddress}}:10249
`))

View File

@ -36,6 +36,8 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"

View File

@ -36,6 +36,8 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"

View File

@ -36,6 +36,8 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"

View File

@ -44,6 +44,8 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"

View File

@ -36,6 +36,8 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"

View File

@ -36,6 +36,8 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"

View File

@ -36,6 +36,8 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"

View File

@ -37,6 +37,8 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"

View File

@ -41,6 +41,8 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"

View File

@ -36,6 +36,8 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"

View File

@ -36,6 +36,8 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"

View File

@ -36,6 +36,8 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"

View File

@ -44,6 +44,8 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"

View File

@ -36,6 +36,8 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"

View File

@ -36,6 +36,8 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"

View File

@ -36,6 +36,8 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"

View File

@ -37,6 +37,8 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"

View File

@ -41,6 +41,8 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"

View File

@ -41,6 +41,7 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"

View File

@ -41,6 +41,7 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"

View File

@ -41,6 +41,7 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"

View File

@ -51,6 +51,7 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"

View File

@ -41,6 +41,7 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"

View File

@ -41,6 +41,7 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"

View File

@ -41,6 +41,7 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"

View File

@ -42,6 +42,7 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"

View File

@ -48,6 +48,7 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"

View File

@ -41,6 +41,7 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"

View File

@ -41,6 +41,7 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"

View File

@ -41,6 +41,7 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"

View File

@ -51,6 +51,7 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"

View File

@ -41,6 +41,7 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"

View File

@ -41,6 +41,7 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"

View File

@ -41,6 +41,7 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"

View File

@ -42,6 +42,7 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"

View File

@ -48,6 +48,7 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"

View File

@ -41,6 +41,7 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"

View File

@ -41,6 +41,7 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"

View File

@ -41,6 +41,7 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"

View File

@ -51,6 +51,7 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"

View File

@ -41,6 +41,7 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"

View File

@ -41,6 +41,7 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"

View File

@ -41,6 +41,7 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"

View File

@ -42,6 +42,7 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"

View File

@ -48,6 +48,7 @@ networking:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"

View File

@ -36,3 +36,16 @@ networking:
dnsDomain: cluster.local
podSubnet: ""
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
metricsBindAddress: 1.1.1.1:10249

View File

@ -36,3 +36,16 @@ networking:
dnsDomain: cluster.local
podSubnet: "192.168.32.0/20"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
metricsBindAddress: 1.1.1.1:10249

View File

@ -36,3 +36,16 @@ networking:
dnsDomain: cluster.local
podSubnet: ""
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
metricsBindAddress: 1.1.1.1:10249

View File

@ -46,3 +46,16 @@ networking:
dnsDomain: cluster.local
podSubnet: ""
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
metricsBindAddress: 1.1.1.1:10249

View File

@ -36,3 +36,16 @@ networking:
dnsDomain: cluster.local
podSubnet: ""
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
metricsBindAddress: 1.1.1.1:10249

View File

@ -36,3 +36,16 @@ networking:
dnsDomain: cluster.local
podSubnet: ""
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
metricsBindAddress: 1.1.1.1:10249

View File

@ -36,3 +36,16 @@ networking:
dnsDomain: 1.1.1.1
podSubnet: ""
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
metricsBindAddress: 1.1.1.1:10249

View File

@ -37,3 +37,16 @@ networking:
dnsDomain: cluster.local
podSubnet: ""
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
metricsBindAddress: 1.1.1.1:10249

View File

@ -43,3 +43,16 @@ networking:
dnsDomain: cluster.local
podSubnet: ""
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
metricsBindAddress: 1.1.1.1:10249

View File

@ -36,3 +36,16 @@ networking:
dnsDomain: cluster.local
podSubnet: ""
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
metricsBindAddress: 1.1.1.1:10249

View File

@ -36,3 +36,16 @@ networking:
dnsDomain: cluster.local
podSubnet: "192.168.32.0/20"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
metricsBindAddress: 1.1.1.1:10249

View File

@ -36,3 +36,16 @@ networking:
dnsDomain: cluster.local
podSubnet: ""
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
metricsBindAddress: 1.1.1.1:10249

View File

@ -46,3 +46,16 @@ networking:
dnsDomain: cluster.local
podSubnet: ""
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
metricsBindAddress: 1.1.1.1:10249

View File

@ -36,3 +36,16 @@ networking:
dnsDomain: cluster.local
podSubnet: ""
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
metricsBindAddress: 1.1.1.1:10249

View File

@ -36,3 +36,16 @@ networking:
dnsDomain: cluster.local
podSubnet: ""
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
metricsBindAddress: 1.1.1.1:10249

View File

@ -36,3 +36,16 @@ networking:
dnsDomain: 1.1.1.1
podSubnet: ""
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
metricsBindAddress: 1.1.1.1:10249

View File

@ -37,3 +37,16 @@ networking:
dnsDomain: cluster.local
podSubnet: ""
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
metricsBindAddress: 1.1.1.1:10249

View File

@ -43,3 +43,16 @@ networking:
dnsDomain: cluster.local
podSubnet: ""
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
metricsBindAddress: 1.1.1.1:10249

View File

@ -36,3 +36,16 @@ networking:
dnsDomain: cluster.local
podSubnet: ""
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
metricsBindAddress: 1.1.1.1:10249

View File

@ -36,3 +36,16 @@ networking:
dnsDomain: cluster.local
podSubnet: "192.168.32.0/20"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
metricsBindAddress: 1.1.1.1:10249

View File

@ -36,3 +36,16 @@ networking:
dnsDomain: cluster.local
podSubnet: ""
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
metricsBindAddress: 1.1.1.1:10249

View File

@ -46,3 +46,16 @@ networking:
dnsDomain: cluster.local
podSubnet: ""
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
metricsBindAddress: 1.1.1.1:10249

View File

@ -36,3 +36,16 @@ networking:
dnsDomain: cluster.local
podSubnet: ""
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
metricsBindAddress: 1.1.1.1:10249

View File

@ -36,3 +36,16 @@ networking:
dnsDomain: cluster.local
podSubnet: ""
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
metricsBindAddress: 1.1.1.1:10249

View File

@ -36,3 +36,16 @@ networking:
dnsDomain: 1.1.1.1
podSubnet: ""
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
metricsBindAddress: 1.1.1.1:10249

View File

@ -37,3 +37,16 @@ networking:
dnsDomain: cluster.local
podSubnet: ""
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
metricsBindAddress: 1.1.1.1:10249

View File

@ -43,3 +43,16 @@ networking:
dnsDomain: cluster.local
podSubnet: ""
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
metricsBindAddress: 1.1.1.1:10249

View File

@ -61,16 +61,16 @@ func WithCodeT(code int, format string, a ...out.V) {
func WithError(msg string, err error) {
p := problem.FromError(err, runtime.GOOS)
if p != nil {
WithProblem(msg, p)
WithProblem(msg, err, p)
}
displayError(msg, err)
os.Exit(Software)
}
// WithProblem outputs info related to a known problem and exits.
func WithProblem(msg string, p *problem.Problem) {
func WithProblem(msg string, err error, p *problem.Problem) {
out.ErrT(out.Empty, "")
out.FatalT(msg)
out.ErrT(out.FailureType, "[{{.id}}] {{.msg}} {{.error}}", out.V{"msg": msg, "id": p.ID, "error": p.Err})
p.Display()
if p.ShowIssueLink {
out.ErrT(out.Empty, "")

View File

@ -38,13 +38,14 @@ func ConfigFile() string {
// MiniPath returns the path to the user's minikube dir
func MiniPath() string {
if os.Getenv(MinikubeHome) == "" {
minikubeHomeEnv := os.Getenv(MinikubeHome)
if minikubeHomeEnv == "" {
return filepath.Join(homedir.HomeDir(), ".minikube")
}
if filepath.Base(os.Getenv(MinikubeHome)) == ".minikube" {
return os.Getenv(MinikubeHome)
if filepath.Base(minikubeHomeEnv) == ".minikube" {
return minikubeHomeEnv
}
return filepath.Join(os.Getenv(MinikubeHome), ".minikube")
return filepath.Join(minikubeHomeEnv, ".minikube")
}
// MakeMiniPath is a utility to calculate a relative path to our directory.

View File

@ -17,10 +17,15 @@ limitations under the License.
package localpath
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"testing"
"k8s.io/client-go/util/homedir"
)
func TestReplaceWinDriveLetterToVolumeName(t *testing.T) {
@ -61,3 +66,95 @@ func TestHasWindowsDriveLetter(t *testing.T) {
}
}
}
func TestMiniPath(t *testing.T) {
var testCases = []struct {
env, basePath string
}{
{"/tmp/.minikube", "/tmp/"},
{"/tmp/", "/tmp"},
{"", homedir.HomeDir()},
}
originalEnv := os.Getenv(MinikubeHome)
defer func() { // revert to pre-test env var
err := os.Setenv(MinikubeHome, originalEnv)
if err != nil {
t.Fatalf("Error reverting env %s to its original value (%s) var after test ", MinikubeHome, originalEnv)
}
}()
for _, tc := range testCases {
t.Run(tc.env, func(t *testing.T) {
expectedPath := filepath.Join(tc.basePath, ".minikube")
os.Setenv(MinikubeHome, tc.env)
path := MiniPath()
if path != expectedPath {
t.Errorf("MiniPath expected to return '%s', but got '%s'", expectedPath, path)
}
})
}
}
func TestMachinePath(t *testing.T) {
var testCases = []struct {
miniHome []string
contains string
}{
{[]string{"tmp", "foo", "bar", "baz"}, "tmp"},
{[]string{"tmp"}, "tmp"},
{[]string{}, MiniPath()},
}
for _, tc := range testCases {
t.Run(fmt.Sprintf("%s", tc.miniHome), func(t *testing.T) {
machinePath := MachinePath("foo", tc.miniHome...)
if !strings.Contains(machinePath, tc.contains) {
t.Errorf("Function MachinePath returned (%v) which doesn't contain expected (%v)", machinePath, tc.contains)
}
})
}
}
type propertyFnWithArg func(string) string
func TestPropertyWithNameArg(t *testing.T) {
var testCases = []struct {
propertyFunc propertyFnWithArg
name string
}{
{Profile, "Profile"},
{ClientCert, "ClientCert"},
{ClientKey, "ClientKey"},
}
miniPath := MiniPath()
mockedName := "foo"
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
if !strings.Contains(tc.propertyFunc(mockedName), MiniPath()) {
t.Errorf("Property %s(%v) doesn't contain miniPath %v", tc.name, tc.propertyFunc, miniPath)
}
if !strings.Contains(tc.propertyFunc(mockedName), mockedName) {
t.Errorf("Property %s(%v) doesn't contain passed name %v", tc.name, tc.propertyFunc, mockedName)
}
})
}
}
type propertyFnWithoutArg func() string
func TestPropertyWithoutNameArg(t *testing.T) {
var testCases = []struct {
propertyFunc propertyFnWithoutArg
name string
}{
{ConfigFile, "ConfigFile"},
{CACert, "CACert"},
}
miniPath := MiniPath()
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
if !strings.Contains(tc.propertyFunc(), MiniPath()) {
t.Errorf("Property %s(%v) doesn't contain expected miniPath %v", tc.name, tc.propertyFunc, miniPath)
}
})
}
}

View File

@ -21,6 +21,7 @@ import (
"os"
"path"
"path/filepath"
"strings"
"sync"
"time"
@ -158,36 +159,51 @@ func needsTransfer(imgClient *client.Client, imgName string, cr cruntime.Manager
// CacheAndLoadImages caches and loads images to all profiles
func CacheAndLoadImages(images []string) error {
// This is the most important thing
if err := image.SaveToDir(images, constants.ImageCacheDir); err != nil {
return err
return errors.Wrap(err, "save to dir")
}
api, err := NewAPIClient()
if err != nil {
return err
return errors.Wrap(err, "api")
}
defer api.Close()
profiles, _, err := config.ListProfiles() // need to load image to all profiles
if err != nil {
return errors.Wrap(err, "list profiles")
}
succeeded := []string{}
failed := []string{}
for _, p := range profiles { // loading images to all running profiles
pName := p.Name // capture the loop variable
c, err := config.Load(pName)
if err != nil {
return err
// Non-fatal because it may race with profile deletion
glog.Errorf("Failed to load profile %q: %v", pName, err)
failed = append(failed, pName)
continue
}
for _, n := range c.Nodes {
m := driver.MachineName(*c, n)
status, err := Status(api, m)
if err != nil {
glog.Warningf("skipping loading cache for profile %s", pName)
glog.Errorf("error getting status for %s: %v", pName, err)
continue // try next machine
failed = append(failed, pName)
continue
}
if status == state.Running.String() { // the not running hosts will load on next start
h, err := api.Load(m)
if err != nil {
return err
glog.Errorf("Failed to load machine %q: %v", m, err)
failed = append(failed, pName)
continue
}
cr, err := CommandRunner(h)
if err != nil {
@ -195,12 +211,18 @@ func CacheAndLoadImages(images []string) error {
}
err = LoadImages(c, cr, images, constants.ImageCacheDir)
if err != nil {
failed = append(failed, pName)
glog.Warningf("Failed to load cached images for profile %s. make sure the profile is running. %v", pName, err)
}
succeeded = append(succeeded, pName)
}
}
}
return err
glog.Infof("succeeded pushing to: %s", strings.Join(succeeded, " "))
glog.Infof("failed pushing to: %s", strings.Join(failed, " "))
// Live pushes are not considered a failure
return nil
}
// transferAndLoadImage transfers and loads a single image from the cache

View File

@ -39,9 +39,8 @@ func Add(cc *config.ClusterConfig, n config.Node) error {
return errors.Wrap(err, "save node")
}
// TODO: Start should return an error rather than calling exit!
Start(*cc, n, nil, false)
return nil
_, err := Start(*cc, n, nil, false)
return err
}
// Delete stops and deletes the given node from the given cluster

View File

@ -63,7 +63,14 @@ const (
)
// Start spins up a guest and starts the kubernetes node.
func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool, apiServer bool) *kubeconfig.Settings {
func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool, apiServer bool) (*kubeconfig.Settings, error) {
cp := ""
if apiServer {
cp = "control plane "
}
out.T(out.ThumbsUp, "Starting {{.controlPlane}}node {{.name}} in cluster {{.cluster}}", out.V{"controlPlane": cp, "name": n.Name, "cluster": cc.Name})
var kicGroup errgroup.Group
if driver.IsKIC(cc.Driver) {
beginDownloadKicArtifacts(&kicGroup)
@ -91,7 +98,7 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo
sv, err := util.ParseKubernetesVersion(n.KubernetesVersion)
if err != nil {
exit.WithError("Failed to parse kubernetes version", err)
return nil, errors.Wrap(err, "Failed to parse kubernetes version")
}
// configure the runtime (docker, containerd, crio)
@ -99,12 +106,12 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo
showVersionInfo(n.KubernetesVersion, cr)
var bs bootstrapper.Bootstrapper
var kubeconfig *kubeconfig.Settings
var kcs *kubeconfig.Settings
if apiServer {
// Must be written before bootstrap, otherwise health checks may flake due to stale IP
kubeconfig, err = setupKubeconfig(host, &cc, &n, cc.Name)
kcs = setupKubeconfig(host, &cc, &n, cc.Name)
if err != nil {
exit.WithError("Failed to setup kubeconfig", err)
return nil, errors.Wrap(err, "Failed to setup kubeconfig")
}
// setup kubeadm (must come after setupKubeconfig)
@ -113,16 +120,20 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo
if err != nil {
exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, cc, mRunner))
}
// write the kubeconfig to the file system after everything required (like certs) are created by the bootstrapper
if err := kubeconfig.Update(kcs); err != nil {
return nil, errors.Wrap(err, "Failed to update kubeconfig file.")
}
} else {
bs, err = cluster.Bootstrapper(machineAPI, viper.GetString(cmdcfg.Bootstrapper), cc, n)
if err != nil {
exit.WithError("Failed to get bootstrapper", err)
return nil, errors.Wrap(err, "Failed to get bootstrapper")
}
if err = bs.SetupCerts(cc.KubernetesConfig, n); err != nil {
exit.WithError("setting up certs", err)
return nil, errors.Wrap(err, "setting up certs")
}
}
configureMounts()
@ -146,35 +157,34 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo
// Skip pre-existing, because we already waited for health
if viper.GetBool(waitUntilHealthy) && !preExists {
if err := bs.WaitForNode(cc, n, viper.GetDuration(waitTimeout)); err != nil {
exit.WithError("Wait failed", err)
return nil, errors.Wrap(err, "Wait failed")
}
}
} else {
if err := bs.UpdateNode(cc, n, cr); err != nil {
exit.WithError("Updating node", err)
return nil, errors.Wrap(err, "Updating node")
}
cp, err := config.PrimaryControlPlane(&cc)
if err != nil {
exit.WithError("Getting primary control plane", err)
return nil, errors.Wrap(err, "Getting primary control plane")
}
cpBs, err := cluster.Bootstrapper(machineAPI, viper.GetString(cmdcfg.Bootstrapper), cc, cp)
if err != nil {
exit.WithError("Getting bootstrapper", err)
return nil, errors.Wrap(err, "Getting bootstrapper")
}
joinCmd, err := cpBs.GenerateToken(cc)
if err != nil {
exit.WithError("generating join token", err)
return nil, errors.Wrap(err, "generating join token")
}
if err = bs.JoinCluster(cc, n, joinCmd); err != nil {
exit.WithError("joining cluster", err)
return nil, errors.Wrap(err, "joining cluster")
}
}
return kubeconfig
return kcs, nil
}
// ConfigureRuntimes does what needs to happen to get a runtime going.
@ -237,7 +247,7 @@ func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node)
return bs
}
func setupKubeconfig(h *host.Host, cc *config.ClusterConfig, n *config.Node, clusterName string) (*kubeconfig.Settings, error) {
func setupKubeconfig(h *host.Host, cc *config.ClusterConfig, n *config.Node, clusterName string) *kubeconfig.Settings {
addr, err := apiServerURL(*h, *cc, *n)
if err != nil {
exit.WithError("Failed to get API Server URL", err)
@ -257,10 +267,7 @@ func setupKubeconfig(h *host.Host, cc *config.ClusterConfig, n *config.Node, clu
}
kcs.SetPath(kubeconfig.PathFromEnv())
if err := kubeconfig.Update(kcs); err != nil {
return kcs, err
}
return kcs, nil
return kcs
}
func apiServerURL(h host.Host, cc config.ClusterConfig, n config.Node) (string, error) {
@ -325,10 +332,7 @@ func startHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*hos
return host, exists
}
out.T(out.FailureType, "StartHost failed again: {{.error}}", out.V{"error": err})
out.T(out.Workaround, `Run: "{{.delete}}", then "{{.start}} --alsologtostderr -v=1" to try again with more logging`,
out.V{"delete": mustload.ExampleCmd(cc.Name, "delete"), "start": mustload.ExampleCmd(cc.Name, "start")})
// Don't use host.Driver to avoid nil pointer deref
drv := cc.Driver
exit.WithError(fmt.Sprintf(`Failed to start %s %s. "%s" may fix it.`, drv, driver.MachineType(drv), mustload.ExampleCmd(cc.Name, "start")), err)
return host, exists
@ -363,8 +367,8 @@ func validateNetwork(h *host.Host, r command.Runner) string {
trySSH(h, ip)
}
tryLookup(r)
tryRegistry(r)
// Non-blocking
go tryRegistry(r, h.Driver.DriverName())
return ip
}
@ -405,21 +409,12 @@ func trySSH(h *host.Host, ip string) {
}
}
func tryLookup(r command.Runner) {
// DNS check
if rr, err := r.RunCmd(exec.Command("nslookup", "kubernetes.io", "-type=ns")); err != nil {
glog.Infof("%s failed: %v which might be okay will retry nslookup without query type", rr.Args, err)
// will try with without query type for ISOs with different busybox versions.
if _, err = r.RunCmd(exec.Command("nslookup", "kubernetes.io")); err != nil {
glog.Warningf("nslookup failed: %v", err)
out.WarningT("Node may be unable to resolve external DNS records")
}
}
}
func tryRegistry(r command.Runner) {
// Try an HTTPS connection to the image repository
// tryRegistry tries to connect to the image repository
func tryRegistry(r command.Runner, driverName string) {
// 2 second timeout. For best results, call tryRegistry in a non-blocking manner.
opts := []string{"-sS", "-m", "2"}
proxy := os.Getenv("HTTPS_PROXY")
opts := []string{"-sS"}
if proxy != "" && !strings.HasPrefix(proxy, "localhost") && !strings.HasPrefix(proxy, "127.0") {
opts = append([]string{"-x", proxy}, opts...)
}
@ -432,7 +427,8 @@ func tryRegistry(r command.Runner) {
opts = append(opts, fmt.Sprintf("https://%s/", repo))
if rr, err := r.RunCmd(exec.Command("curl", opts...)); err != nil {
glog.Warningf("%s failed: %v", rr.Args, err)
out.WarningT("VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository", out.V{"repository": repo})
out.WarningT("This {{.type}} is having trouble accessing https://{{.repository}}", out.V{"repository": repo, "type": driver.MachineType(driverName)})
out.T(out.Tip, "To pull new external images, you may need to configure a proxy: https://minikube.sigs.k8s.io/docs/reference/networking/proxy/")
}
}

View File

@ -57,7 +57,6 @@ type match struct {
// Display problem metadata to the console
func (p *Problem) Display() {
out.ErrT(out.FailureType, "Error: [{{.id}}] {{.error}}", out.V{"id": p.ID, "error": p.Err})
out.ErrT(out.Tip, "Suggestion: {{.advice}}", out.V{"advice": translate.T(p.Advice)})
if p.URL != "" {
out.ErrT(out.Documentation, "Documentation: {{.url}}", out.V{"url": p.URL})
@ -65,6 +64,12 @@ func (p *Problem) Display() {
if len(p.Issues) == 0 {
return
}
if len(p.Issues) == 1 {
out.ErrT(out.Issues, "Related issue: {{.url}}", out.V{"url": fmt.Sprintf("%s/%d", issueBase, p.Issues[0])})
return
}
out.ErrT(out.Issues, "Related issues:")
issues := p.Issues
if len(issues) > 3 {

View File

@ -44,7 +44,6 @@ func TestDisplay(t *testing.T) {
problem: Problem{ID: "example", URL: "example.com", Err: fmt.Errorf("test")},
description: "url, id and err",
expected: `
* Error: [example] test
* Suggestion:
* Documentation: example.com
`,
@ -53,7 +52,6 @@ func TestDisplay(t *testing.T) {
problem: Problem{ID: "example", URL: "example.com", Err: fmt.Errorf("test"), Issues: []int{0, 1}, Advice: "you need a hug"},
description: "with 2 issues and suggestion",
expected: `
* Error: [example] test
* Suggestion: you need a hug
* Documentation: example.com
* Related issues:
@ -65,7 +63,6 @@ func TestDisplay(t *testing.T) {
problem: Problem{ID: "example", URL: "example.com", Err: fmt.Errorf("test"), Issues: []int{0, 1}},
description: "with 2 issues",
expected: `
* Error: [example] test
* Suggestion:
* Documentation: example.com
* Related issues:
@ -78,7 +75,6 @@ func TestDisplay(t *testing.T) {
problem: Problem{ID: "example", URL: "example.com", Err: fmt.Errorf("test"), Issues: []int{0, 1, 2, 3, 4, 5}},
description: "with 6 issues",
expected: `
* Error: [example] test
* Suggestion:
* Documentation: example.com
* Related issues:

View File

@ -1,189 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"net"
"reflect"
"strconv"
"strings"
"time"
utilnet "k8s.io/apimachinery/pkg/util/net"
)
// findNestedElement uses reflection to find the element corresponding to the dot-separated string parameter.
func findNestedElement(s string, c interface{}) (reflect.Value, error) {
fields := strings.Split(s, ".")
// Take the ValueOf to get a pointer, so we can actually mutate the element.
e := reflect.Indirect(reflect.ValueOf(c).Elem())
for _, field := range fields {
e = reflect.Indirect(e.FieldByName(field))
// FieldByName returns the zero value if the field does not exist.
if e == (reflect.Value{}) {
return e, fmt.Errorf("unable to find field by name: %s", field)
}
// Start the loop again, on the next level.
}
return e, nil
}
// setElement sets the supplied element to the value in the supplied string. The string will be coerced to the correct type.
func setElement(e reflect.Value, v string) error {
switch e.Interface().(type) {
case int, int32, int64:
return convertInt(e, v)
case string:
return convertString(e, v)
case float32, float64:
return convertFloat(e, v)
case bool:
return convertBool(e, v)
case net.IP:
return convertIP(e, v)
case net.IPNet:
return convertCIDR(e, v)
case utilnet.PortRange:
return convertPortRange(e, v)
case time.Duration:
return convertDuration(e, v)
case []string:
vals := strings.Split(v, ",")
e.Set(reflect.ValueOf(vals))
case map[string]string:
return convertMap(e, v)
default:
// Last ditch attempt to convert anything based on its underlying kind.
// This covers any types that are aliased to a native type
return convertKind(e, v)
}
return nil
}
func convertMap(e reflect.Value, v string) error {
if e.IsNil() {
e.Set(reflect.MakeMap(e.Type()))
}
vals := strings.Split(v, ",")
for _, subitem := range vals {
subvals := strings.FieldsFunc(subitem, func(c rune) bool {
return c == '<' || c == '=' || c == '>'
})
if len(subvals) != 2 {
return fmt.Errorf("unparsable %s", v)
}
e.SetMapIndex(reflect.ValueOf(subvals[0]), reflect.ValueOf(subvals[1]))
}
return nil
}
func convertKind(e reflect.Value, v string) error {
switch e.Kind() {
case reflect.Int, reflect.Int32, reflect.Int64:
return convertInt(e, v)
case reflect.String:
return convertString(e, v)
case reflect.Float32, reflect.Float64:
return convertFloat(e, v)
case reflect.Bool:
return convertBool(e, v)
default:
return fmt.Errorf("unable to set type %T", e.Kind())
}
}
func convertInt(e reflect.Value, v string) error {
i, err := strconv.Atoi(v)
if err != nil {
return fmt.Errorf("error converting input %s to an integer: %v", v, err)
}
e.SetInt(int64(i))
return nil
}
func convertString(e reflect.Value, v string) error {
e.SetString(v)
return nil
}
func convertFloat(e reflect.Value, v string) error {
f, err := strconv.ParseFloat(v, 64)
if err != nil {
return fmt.Errorf("error converting input %s to a float: %v", v, err)
}
e.SetFloat(f)
return nil
}
func convertBool(e reflect.Value, v string) error {
b, err := strconv.ParseBool(v)
if err != nil {
return fmt.Errorf("error converting input %s to a bool: %v", v, err)
}
e.SetBool(b)
return nil
}
func convertIP(e reflect.Value, v string) error {
ip := net.ParseIP(v)
if ip == nil {
return fmt.Errorf("error converting input %s to an IP", v)
}
e.Set(reflect.ValueOf(ip))
return nil
}
func convertCIDR(e reflect.Value, v string) error {
_, cidr, err := net.ParseCIDR(v)
if err != nil {
return fmt.Errorf("error converting input %s to a CIDR: %v", v, err)
}
e.Set(reflect.ValueOf(*cidr))
return nil
}
func convertPortRange(e reflect.Value, v string) error {
pr, err := utilnet.ParsePortRange(v)
if err != nil {
return fmt.Errorf("error converting input %s to PortRange: %v", v, err)
}
e.Set(reflect.ValueOf(*pr))
return nil
}
func convertDuration(e reflect.Value, v string) error {
dur, err := time.ParseDuration(v)
if err != nil {
return fmt.Errorf("error converting input %s to Duration: %v", v, err)
}
e.Set(reflect.ValueOf(dur))
return nil
}
// FindAndSet sets the nested value.
func FindAndSet(path string, c interface{}, value string) error {
elem, err := findNestedElement(path, c)
if err != nil {
return err
}
return setElement(elem, value)
}

View File

@ -1,197 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"math"
"net"
"reflect"
"testing"
"time"
utilnet "k8s.io/apimachinery/pkg/util/net"
)
type aliasedString string
type testConfig struct {
A string
B int
C float32
D subConfig1
E *subConfig2
}
type subConfig1 struct {
F string
G int
H float32
I subConfig3
}
type subConfig2 struct {
J string
K int
L float32
}
type subConfig3 struct {
M string
N int
O float32
P bool
Q net.IP
R utilnet.PortRange
S []string
T aliasedString
U net.IPNet
V time.Duration
}
func buildConfig() testConfig {
_, cidr, _ := net.ParseCIDR("12.34.56.78/16")
return testConfig{
A: "foo",
B: 1,
C: 1.1,
D: subConfig1{
F: "bar",
G: 2,
H: 2.2,
I: subConfig3{
M: "baz",
N: 3,
O: 3.3,
P: false,
Q: net.ParseIP("12.34.56.78"),
R: utilnet.PortRange{Base: 2, Size: 4},
U: *cidr,
V: 5 * time.Second,
},
},
E: &subConfig2{
J: "bat",
K: 4,
L: 4.4,
},
}
}
func TestFindNestedStrings(t *testing.T) {
a := buildConfig()
for _, tc := range []struct {
input string
output string
}{
{"A", "foo"},
{"D.F", "bar"},
{"D.I.M", "baz"},
{"E.J", "bat"},
} {
v, err := findNestedElement(tc.input, &a)
if err != nil {
t.Fatalf("Did not expect error. Got: %v", err)
}
if v.String() != tc.output {
t.Fatalf("Expected: %s, got %s", tc.output, v.String())
}
}
}
func TestFindNestedInts(t *testing.T) {
a := buildConfig()
for _, tc := range []struct {
input string
output int64
}{
{"B", 1},
{"D.G", 2},
{"D.I.N", 3},
{"E.K", 4},
} {
v, err := findNestedElement(tc.input, &a)
if err != nil {
t.Fatalf("Did not expect error. Got: %v", err)
}
if v.Int() != tc.output {
t.Fatalf("Expected: %d, got %d", tc.output, v.Int())
}
}
}
func checkFloats(f1, f2 float64) bool {
return math.Abs(f1-f2) < .00001
}
func TestFindNestedFloats(t *testing.T) {
a := buildConfig()
for _, tc := range []struct {
input string
output float64
}{
{"C", 1.1},
{"D.H", 2.2},
{"D.I.O", 3.3},
{"E.L", 4.4},
} {
v, err := findNestedElement(tc.input, &a)
if err != nil {
t.Fatalf("Did not expect error. Got: %v", err)
}
// Floating point comparison is tricky.
if !checkFloats(tc.output, v.Float()) {
t.Fatalf("Expected: %v, got %v", tc.output, v.Float())
}
}
}
func TestSetElement(t *testing.T) {
for _, tc := range []struct {
path string
newval string
checker func(testConfig) bool
}{
{"A", "newstring", func(t testConfig) bool { return t.A == "newstring" }},
{"B", "13", func(t testConfig) bool { return t.B == 13 }},
{"C", "3.14", func(t testConfig) bool { return checkFloats(float64(t.C), 3.14) }},
{"D.F", "fizzbuzz", func(t testConfig) bool { return t.D.F == "fizzbuzz" }},
{"D.G", "4", func(t testConfig) bool { return t.D.G == 4 }},
{"D.H", "7.3", func(t testConfig) bool { return checkFloats(float64(t.D.H), 7.3) }},
{"E.J", "otherstring", func(t testConfig) bool { return t.E.J == "otherstring" }},
{"E.K", "17", func(t testConfig) bool { return t.E.K == 17 }},
{"E.L", "1.234", func(t testConfig) bool { return checkFloats(float64(t.E.L), 1.234) }},
{"D.I.P", "true", func(t testConfig) bool { return t.D.I.P == true }},
{"D.I.P", "false", func(t testConfig) bool { return t.D.I.P == false }},
{"D.I.Q", "11.22.33.44", func(t testConfig) bool { return t.D.I.Q.Equal(net.ParseIP("11.22.33.44")) }},
{"D.I.R", "7-11", func(t testConfig) bool { return t.D.I.R.Base == 7 && t.D.I.R.Size == 5 }},
{"D.I.S", "a,b", func(t testConfig) bool { return reflect.DeepEqual(t.D.I.S, []string{"a", "b"}) }},
{"D.I.T", "foo", func(t testConfig) bool { return t.D.I.T == "foo" }},
{"D.I.U", "11.22.0.0/16", func(t testConfig) bool { return t.D.I.U.String() == "11.22.0.0/16" }},
{"D.I.V", "5s", func(t testConfig) bool { return t.D.I.V == 5*time.Second }},
} {
a := buildConfig()
if err := FindAndSet(tc.path, &a, tc.newval); err != nil {
t.Fatalf("Error setting value: %v", err)
}
if !tc.checker(a) {
t.Fatalf("Error, values not correct: %v, %s, %s", a, tc.newval, tc.path)
}
}
}

View File

@ -33,39 +33,20 @@ pygmentsStyle = "tango"
[permalinks]
blog = "/:section/:year/:month/:day/:slug/"
[module]
[[module.mounts]]
source = "../deploy/addons/gvisor/"
target = "content/gvisor/"
[[module.mounts]]
source = "../deploy/addons/helm-tiller/"
target = "content/helm-tiller/"
[[module.mounts]]
source = "../deploy/addons/istio/"
target = "content/istio/"
[[module.mounts]]
source = "../deploy/addons/ingress-dns/"
target = "content/ingress-dns/"
[[module.mounts]]
source = "../deploy/addons/storage-provisioner-gluster/"
target = "content/storage-provisioner-gluster/"
[[module.mounts]]
source = "../deploy/addons/layouts/"
target = "layouts"
[markup]
[markup.highlight]
codeFences = true
hl_Lines = ""
lineNoStart = 1
lineNos = false
lineNumbersInTable = true
noClasses = true
style = "vs"
tabWidth = 4
[[module.mounts]]
source = "content/en"
target = "content"
[[module.mounts]]
source = "layouts"
target = "layouts"
## Configuration for BlackFriday markdown parser: https://github.com/russross/blackfriday
[blackfriday]
plainIDAnchors = true
hrefTargetBlank = true
angledQuotes = false
latexDashes = true
# allow html in markdown
[markup.goldmark.renderer]
unsafe=true
# Image processing configuration.
[imaging]

View File

@ -18,7 +18,10 @@ To serve documentation pages locally, clone the `minikube` repository and run:
`make site`
NOTE: On Windows, our site currently causes Hugo to `panic`.
Notes :
* On GNU/Linux, golang package shipped with the distribution may not be recent enough. Use the latest version.
* On Windows, our site currently causes Hugo to `panic`.
## Lint

View File

@ -0,0 +1,8 @@
---
title: "FAQ"
linkTitle: "FAQ"
weight: 5
description: >
Questions that come up regularly
---

View File

@ -0,0 +1,20 @@
---
title: "Sudo prompts"
linkTitle: "Sudo prompts"
weight: 1
date: 2020-03-26
description: >
Disabling sudo prompts when using minikude start/stop/status, kubectl cluster-info, ...
---
## Use the `docker` driver
Use the `docker` driver rather than the `none` driver. `docker` driver should be used unless it does not meet requirements for some reason.
## For `none` users
For `none` users, `CHANGE_MINIKUBE_NONE_USER=true`, kubectl and such will still work: [see environment variables](https://minikube.sigs.k8s.io/docs/reference/environment_variables/)
## Otherwise deal with `sudo`
Configure `sudo` to never prompt for the commands issued by minikube.

View File

@ -13,9 +13,9 @@ For example the `minikube start --iso-url="$ISO_URL"` flag can also be set by se
## Other variables
Some features can only be accessed by environment variables, here is a list of these features:
Some features can only be accessed by minikube specific environment variables, here is a list of these features:
* **MINIKUBE_HOME** - (string) sets the path for the .minikube directory that minikube uses for state/configuration
* **MINIKUBE_HOME** - (string) sets the path for the .minikube directory that minikube uses for state/configuration. *Please note: this is used only by minikube and does not affect anything related to Kubernetes tools such as kubectl.*
* **MINIKUBE_IN_STYLE** - (bool) manually sets whether or not emoji and colors should appear in minikube. Set to false or 0 to disable this feature, true or 1 to force it to be turned on.

Some files were not shown because too many files have changed in this diff Show More