diff --git a/Makefile b/Makefile index b76b36610b..01f2a4efb5 100644 --- a/Makefile +++ b/Makefile @@ -60,7 +60,7 @@ MINIKUBE_RELEASES_URL=https://github.com/kubernetes/minikube/releases/download KERNEL_VERSION ?= 4.19.107 # latest from https://github.com/golangci/golangci-lint/releases -GOLINT_VERSION ?= v1.29.0 +GOLINT_VERSION ?= v1.30.0 # Limit number of default jobs, to avoid the CI builds running out of memory GOLINT_JOBS ?= 4 # see https://github.com/golangci/golangci-lint#memory-usage-of-golangci-lint diff --git a/cmd/minikube/cmd/cache.go b/cmd/minikube/cmd/cache.go index 30579d45fa..ff1ab11568 100644 --- a/cmd/minikube/cmd/cache.go +++ b/cmd/minikube/cmd/cache.go @@ -23,6 +23,7 @@ import ( "k8s.io/minikube/pkg/minikube/image" "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/node" + "k8s.io/minikube/pkg/minikube/reason" ) // cacheImageConfigKey is the config field name used to store which images we have previously cached @@ -43,11 +44,11 @@ var addCacheCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { // Cache and load images into docker daemon if err := machine.CacheAndLoadImages(args); err != nil { - exit.WithError("Failed to cache and load images", err) + exit.Error(reason.InternalCacheLoad, "Failed to cache and load images", err) } // Add images to config file if err := cmdConfig.AddToConfigMap(cacheImageConfigKey, args); err != nil { - exit.WithError("Failed to update config", err) + exit.Error(reason.InternalAddConfig, "Failed to update config", err) } }, } @@ -60,11 +61,11 @@ var deleteCacheCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { // Delete images from config file if err := cmdConfig.DeleteFromConfigMap(cacheImageConfigKey, args); err != nil { - exit.WithError("Failed to delete images from config", err) + exit.Error(reason.InternalDelConfig, "Failed to delete images from config", err) } // Delete images from cache/images directory if err := image.DeleteFromCacheDir(args); err != nil { - exit.WithError("Failed to delete images", err) + exit.Error(reason.HostDelCache, "Failed to delete images", err) } }, } @@ -77,7 +78,7 @@ var reloadCacheCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { err := node.CacheAndLoadImagesInConfig() if err != nil { - exit.WithError("Failed to reload cached images", err) + exit.Error(reason.GuestCacheLoad, "Failed to reload cached images", err) } }, } diff --git a/cmd/minikube/cmd/cache_list.go b/cmd/minikube/cmd/cache_list.go index d557733c4f..1c7341c267 100644 --- a/cmd/minikube/cmd/cache_list.go +++ b/cmd/minikube/cmd/cache_list.go @@ -23,6 +23,7 @@ import ( "github.com/spf13/cobra" cmdConfig "k8s.io/minikube/cmd/minikube/cmd/config" "k8s.io/minikube/pkg/minikube/exit" + "k8s.io/minikube/pkg/minikube/reason" ) const defaultCacheListFormat = "{{.CacheImage}}\n" @@ -42,10 +43,10 @@ var listCacheCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { images, err := cmdConfig.ListConfigMap(cacheImageConfigKey) if err != nil { - exit.WithError("Failed to get image map", err) + exit.Error(reason.InternalListConfig, "Failed to get image map", err) } if err := cacheList(images); err != nil { - exit.WithError("Failed to list cached images", err) + exit.Error(reason.InternalCacheList, "Failed to list cached images", err) } }, } diff --git a/cmd/minikube/cmd/completion.go b/cmd/minikube/cmd/completion.go index 35e359e188..c2e083e6a5 100644 --- a/cmd/minikube/cmd/completion.go +++ b/cmd/minikube/cmd/completion.go @@ -25,6 +25,7 @@ import ( "github.com/spf13/cobra" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/reason" ) const longDescription = ` @@ -73,27 +74,26 @@ var completionCmd = &cobra.Command{ Long: longDescription, Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { - exit.UsageT("Usage: minikube completion SHELL") + exit.Message(reason.Usage, "Usage: minikube completion SHELL") } if args[0] != "bash" && args[0] != "zsh" && args[0] != "fish" { - exit.UsageT("Sorry, completion support is not yet implemented for {{.name}}", out.V{"name": args[0]}) + exit.Message(reason.Usage, "Sorry, completion support is not yet implemented for {{.name}}", out.V{"name": args[0]}) } else if args[0] == "bash" { err := GenerateBashCompletion(os.Stdout, cmd.Parent()) if err != nil { - exit.WithError("bash completion failed", err) + exit.Error(reason.InternalCompletion, "bash completion failed", err) } } else if args[0] == "zsh" { err := GenerateZshCompletion(os.Stdout, cmd.Parent()) if err != nil { - exit.WithError("zsh completion failed", err) + exit.Error(reason.InternalCompletion, "zsh completion failed", err) } } else { err := GenerateFishCompletion(os.Stdout, cmd.Parent()) if err != nil { - exit.WithError("fish completion failed", err) + exit.Error(reason.InternalCompletion, "fish completion failed", err) } } - }, } diff --git a/cmd/minikube/cmd/config/addons_list.go b/cmd/minikube/cmd/config/addons_list.go index 521182f226..f418cdf173 100644 --- a/cmd/minikube/cmd/config/addons_list.go +++ b/cmd/minikube/cmd/config/addons_list.go @@ -31,6 +31,8 @@ import ( "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/reason" + "k8s.io/minikube/pkg/minikube/style" ) var addonListOutput string @@ -47,7 +49,7 @@ var addonsListCmd = &cobra.Command{ Long: "Lists all available minikube addons as well as their current statuses (enabled/disabled)", Run: func(cmd *cobra.Command, args []string) { if len(args) != 0 { - exit.UsageT("usage: minikube addons list") + exit.Message(reason.Usage, "usage: minikube addons list") } _, cc := mustload.Partial(ClusterFlagValue()) @@ -57,7 +59,7 @@ var addonsListCmd = &cobra.Command{ case "json": printAddonsJSON(cc) default: - exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'list', 'json'", addonListOutput)) + exit.Message(reason.Usage, fmt.Sprintf("invalid output format: %s. Valid values: 'list', 'json'", addonListOutput)) } }, } @@ -115,7 +117,7 @@ var printAddonsList = func(cc *config.ClusterConfig) { glog.Errorf("list profiles returned error: %v", err) } if len(v) > 1 { - out.T(out.Tip, "To see addons list for other profiles use: `minikube addons -p name list`") + out.T(style.Tip, "To see addons list for other profiles use: `minikube addons -p name list`") } } diff --git a/cmd/minikube/cmd/config/configure.go b/cmd/minikube/cmd/config/configure.go index e3fa9fcd52..25679e2d38 100644 --- a/cmd/minikube/cmd/config/configure.go +++ b/cmd/minikube/cmd/config/configure.go @@ -25,7 +25,9 @@ import ( "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/reason" "k8s.io/minikube/pkg/minikube/service" + "k8s.io/minikube/pkg/minikube/style" ) var addonsConfigureCmd = &cobra.Command{ @@ -34,7 +36,7 @@ var addonsConfigureCmd = &cobra.Command{ Long: "Configures the addon w/ADDON_NAME within minikube (example: minikube addons configure registry-creds). For a list of available addons use: minikube addons list ", Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { - exit.UsageT("usage: minikube addons configure ADDON_NAME") + exit.Message(reason.Usage, "usage: minikube addons configure ADDON_NAME") } addon := args[0] @@ -123,7 +125,6 @@ var addonsConfigureCmd = &cobra.Command{ "cloud": "ecr", "kubernetes.io/minikube-addons": "registry-creds", }) - if err != nil { out.FailureT("ERROR creating `registry-creds-ecr` secret: {{.error}}", out.V{"error": err}) } @@ -204,7 +205,7 @@ var addonsConfigureCmd = &cobra.Command{ } if err := config.SaveProfile(profile, cfg); err != nil { - out.ErrT(out.FatalType, "Failed to save config {{.profile}}", out.V{"profile": profile}) + out.ErrT(style.Fatal, "Failed to save config {{.profile}}", out.V{"profile": profile}) } default: diff --git a/cmd/minikube/cmd/config/disable.go b/cmd/minikube/cmd/config/disable.go index 76d6204c0d..f235adb6a6 100644 --- a/cmd/minikube/cmd/config/disable.go +++ b/cmd/minikube/cmd/config/disable.go @@ -21,6 +21,8 @@ import ( "k8s.io/minikube/pkg/addons" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/reason" + "k8s.io/minikube/pkg/minikube/style" ) var addonsDisableCmd = &cobra.Command{ @@ -29,18 +31,18 @@ var addonsDisableCmd = &cobra.Command{ Long: "Disables the addon w/ADDON_NAME within minikube (example: minikube addons disable dashboard). For a list of available addons use: minikube addons list ", Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { - exit.UsageT("usage: minikube addons disable ADDON_NAME") + exit.Message(reason.Usage, "usage: minikube addons disable ADDON_NAME") } addon := args[0] if addon == "heapster" { - exit.WithCodeT(exit.Unavailable, "The heapster addon is depreciated. please try to disable metrics-server instead") + exit.Message(reason.AddonUnsupported, "The heapster addon is depreciated. please try to disable metrics-server instead") } err := addons.SetAndSave(ClusterFlagValue(), addon, "false") if err != nil { - exit.WithError("disable failed", err) + exit.Error(reason.InternalDisable, "disable failed", err) } - out.T(out.AddonDisable, `"The '{{.minikube_addon}}' addon is disabled`, out.V{"minikube_addon": addon}) + out.T(style.AddonDisable, `"The '{{.minikube_addon}}' addon is disabled`, out.V{"minikube_addon": addon}) }, } diff --git a/cmd/minikube/cmd/config/enable.go b/cmd/minikube/cmd/config/enable.go index d9477b48e4..7eb25b9d5b 100644 --- a/cmd/minikube/cmd/config/enable.go +++ b/cmd/minikube/cmd/config/enable.go @@ -24,6 +24,8 @@ import ( "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/reason" + "k8s.io/minikube/pkg/minikube/style" ) var addonsEnableCmd = &cobra.Command{ @@ -32,24 +34,24 @@ var addonsEnableCmd = &cobra.Command{ Long: "Enables the addon w/ADDON_NAME within minikube (example: minikube addons enable dashboard). For a list of available addons use: minikube addons list ", Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { - exit.UsageT("usage: minikube addons enable ADDON_NAME") + exit.Message(reason.Usage, "usage: minikube addons enable ADDON_NAME") } addon := args[0] // replace heapster as metrics-server because heapster is deprecated if addon == "heapster" { - out.T(out.Waiting, "enable metrics-server addon instead of heapster addon because heapster is deprecated") + out.T(style.Waiting, "enable metrics-server addon instead of heapster addon because heapster is deprecated") addon = "metrics-server" } err := addons.SetAndSave(ClusterFlagValue(), addon, "true") if err != nil { - exit.WithError("enable failed", err) + exit.Error(reason.InternalEnable, "enable failed", err) } if addon == "dashboard" { tipProfileArg := "" if ClusterFlagValue() != constants.DefaultClusterName { tipProfileArg = fmt.Sprintf(" -p %s", ClusterFlagValue()) } - out.T(out.Tip, `Some dashboard features require the metrics-server addon. To enable all features please run: + out.T(style.Tip, `Some dashboard features require the metrics-server addon. To enable all features please run: minikube{{.profileArg}} addons enable metrics-server @@ -57,7 +59,7 @@ var addonsEnableCmd = &cobra.Command{ } - out.T(out.AddonEnable, "The '{{.addonName}}' addon is enabled", out.V{"addonName": addon}) + out.T(style.AddonEnable, "The '{{.addonName}}' addon is enabled", out.V{"addonName": addon}) }, } diff --git a/cmd/minikube/cmd/config/open.go b/cmd/minikube/cmd/config/open.go index 070fce6fc1..e6521dc16b 100644 --- a/cmd/minikube/cmd/config/open.go +++ b/cmd/minikube/cmd/config/open.go @@ -26,7 +26,9 @@ import ( "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/reason" "k8s.io/minikube/pkg/minikube/service" + "k8s.io/minikube/pkg/minikube/style" ) var ( @@ -47,13 +49,13 @@ var addonsOpenCmd = &cobra.Command{ PreRun: func(cmd *cobra.Command, args []string) { t, err := template.New("addonsURL").Parse(addonsURLFormat) if err != nil { - exit.UsageT("The value passed to --format is invalid: {{.error}}", out.V{"error": err}) + exit.Message(reason.Usage, "The value passed to --format is invalid: {{.error}}", out.V{"error": err}) } addonsURLTemplate = t }, Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { - exit.UsageT("usage: minikube addons open ADDON_NAME") + exit.Message(reason.Usage, "usage: minikube addons open ADDON_NAME") } addonName := args[0] @@ -62,14 +64,14 @@ var addonsOpenCmd = &cobra.Command{ addon, ok := assets.Addons[addonName] // validate addon input if !ok { - exit.WithCodeT(exit.Data, `addon '{{.name}}' is not a valid addon packaged with minikube. + exit.Message(reason.Usage, `addon '{{.name}}' is not a valid addon packaged with minikube. To see the list of available addons run: minikube addons list`, out.V{"name": addonName}) } enabled := addon.IsEnabled(co.Config) if !enabled { - exit.WithCodeT(exit.Unavailable, `addon '{{.name}}' is currently not enabled. + exit.Message(reason.AddonNotEnabled, `addon '{{.name}}' is currently not enabled. To enable this addon run: minikube addons enable {{.name}}`, out.V{"name": addonName}) } @@ -79,10 +81,10 @@ minikube addons enable {{.name}}`, out.V{"name": addonName}) serviceList, err := service.GetServiceListByLabel(cname, namespace, key, addonName) if err != nil { - exit.WithCodeT(exit.Unavailable, "Error getting service with namespace: {{.namespace}} and labels {{.labelName}}:{{.addonName}}: {{.error}}", out.V{"namespace": namespace, "labelName": key, "addonName": addonName, "error": err}) + exit.Message(reason.SvcList, "Error getting service with namespace: {{.namespace}} and labels {{.labelName}}:{{.addonName}}: {{.error}}", out.V{"namespace": namespace, "labelName": key, "addonName": addonName, "error": err}) } if len(serviceList.Items) == 0 { - exit.WithCodeT(exit.Config, `This addon does not have an endpoint defined for the 'addons open' command. + exit.Message(reason.SvcNotFound, `This addon does not have an endpoint defined for the 'addons open' command. You can add one by annotating a service with the label {{.labelName}}:{{.addonName}}`, out.V{"labelName": key, "addonName": addonName}) } for i := range serviceList.Items { @@ -90,14 +92,14 @@ You can add one by annotating a service with the label {{.labelName}}:{{.addonNa var urlString []string if urlString, err = service.WaitForService(co.API, co.Config.Name, namespace, svc, addonsURLTemplate, addonsURLMode, https, wait, interval); err != nil { - exit.WithCodeT(exit.Unavailable, "Wait failed: {{.error}}", out.V{"error": err}) + exit.Message(reason.SvcTimeout, "Wait failed: {{.error}}", out.V{"error": err}) } if len(urlString) != 0 { - out.T(out.Celebrate, "Opening Kubernetes service {{.namespace_name}}/{{.service_name}} in default browser...", out.V{"namespace_name": namespace, "service_name": svc}) + out.T(style.Celebrate, "Opening Kubernetes service {{.namespace_name}}/{{.service_name}} in default browser...", out.V{"namespace_name": namespace, "service_name": svc}) for _, url := range urlString { if err := browser.OpenURL(url); err != nil { - exit.WithError(fmt.Sprintf("browser failed to open url %s", url), err) + exit.Error(reason.HostBrowser, fmt.Sprintf("browser failed to open url %s", url), err) } } } diff --git a/cmd/minikube/cmd/config/profile.go b/cmd/minikube/cmd/config/profile.go index 8f5bef20ab..926229d386 100644 --- a/cmd/minikube/cmd/config/profile.go +++ b/cmd/minikube/cmd/config/profile.go @@ -25,6 +25,8 @@ import ( "k8s.io/minikube/pkg/minikube/kubeconfig" "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/reason" + "k8s.io/minikube/pkg/minikube/style" ) // ProfileCmd represents the profile command @@ -35,26 +37,26 @@ var ProfileCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { if len(args) == 0 { profile := ClusterFlagValue() - out.T(out.Empty, profile) + out.T(style.Empty, profile) os.Exit(0) } if len(args) > 1 { - exit.UsageT("usage: minikube profile [MINIKUBE_PROFILE_NAME]") + exit.Message(reason.Usage, "usage: minikube profile [MINIKUBE_PROFILE_NAME]") } profile := args[0] // Check whether the profile name is container friendly if !config.ProfileNameValid(profile) { out.WarningT("Profile name '{{.profilename}}' is not valid", out.V{"profilename": profile}) - exit.UsageT("Only alphanumeric and dashes '-' are permitted. Minimum 1 character, starting with alphanumeric.") + exit.Message(reason.Usage, "Only alphanumeric and dashes '-' are permitted. Minimum 1 character, starting with alphanumeric.") } /** we need to add code over here to check whether the profile name is in the list of reserved keywords */ if config.ProfileNameInReservedKeywords(profile) { - exit.WithCodeT(exit.Config, `Profile name "{{.profilename}}" is reserved keyword. To delete this profile, run: "{{.cmd}}"`, out.V{"profilename": profile, "cmd": mustload.ExampleCmd(profile, "delete")}) + exit.Message(reason.InternalReservedProfile, `Profile name "{{.profilename}}" is reserved keyword. To delete this profile, run: "{{.cmd}}"`, out.V{"profilename": profile, "cmd": mustload.ExampleCmd(profile, "delete")}) } if profile == "default" { @@ -68,18 +70,18 @@ var ProfileCmd = &cobra.Command{ } if !config.ProfileExists(profile) { - out.ErrT(out.Tip, `if you want to create a profile you can by this command: minikube start -p {{.profile_name}}`, out.V{"profile_name": profile}) + out.ErrT(style.Tip, `if you want to create a profile you can by this command: minikube start -p {{.profile_name}}`, out.V{"profile_name": profile}) os.Exit(0) } err := Set(config.ProfileName, profile) if err != nil { - exit.WithError("Setting profile failed", err) + exit.Error(reason.InternalConfigSet, "Setting profile failed", err) } cc, err := config.Load(profile) // might err when loading older version of cfg file that doesn't have KeepContext field if err != nil && !config.IsNotExist(err) { - out.ErrT(out.Sad, `Error loading profile config: {{.error}}`, out.V{"error": err}) + out.ErrT(style.Sad, `Error loading profile config: {{.error}}`, out.V{"error": err}) } if err == nil { if cc.KeepContext { @@ -88,7 +90,7 @@ var ProfileCmd = &cobra.Command{ } else { err := kubeconfig.SetCurrentContext(profile, kubeconfig.PathFromEnv()) if err != nil { - out.ErrT(out.Sad, `Error while setting kubectl current context : {{.error}}`, out.V{"error": err}) + out.ErrT(style.Sad, `Error while setting kubectl current context : {{.error}}`, out.V{"error": err}) } } out.SuccessT("minikube profile was successfully set to {{.profile_name}}", out.V{"profile_name": profile}) diff --git a/cmd/minikube/cmd/config/profile_list.go b/cmd/minikube/cmd/config/profile_list.go index 3a7cfa6069..86e30b9eed 100644 --- a/cmd/minikube/cmd/config/profile_list.go +++ b/cmd/minikube/cmd/config/profile_list.go @@ -28,36 +28,33 @@ import ( "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/reason" + "k8s.io/minikube/pkg/minikube/style" "github.com/golang/glog" "github.com/olekukonko/tablewriter" "github.com/spf13/cobra" ) -var ( - output string -) +var output string var profileListCmd = &cobra.Command{ Use: "list", Short: "Lists all minikube profiles.", Long: "Lists all valid minikube profiles and detects all possible invalid profiles.", Run: func(cmd *cobra.Command, args []string) { - switch strings.ToLower(output) { case "json": printProfilesJSON() case "table": printProfilesTable() default: - exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'table', 'json'", output)) + exit.Message(reason.Usage, fmt.Sprintf("invalid output format: %s. Valid values: 'table', 'json'", output)) } - }, } var printProfilesTable = func() { - var validData [][]string table := tablewriter.NewWriter(os.Stdout) table.SetHeader([]string{"Profile", "VM Driver", "Runtime", "IP", "Port", "Version", "Status"}) @@ -67,7 +64,7 @@ var printProfilesTable = func() { validProfiles, invalidProfiles, err := config.ListProfiles() if len(validProfiles) == 0 || err != nil { - exit.UsageT("No minikube profile was found. You can create one using `minikube start`.") + exit.Message(reason.Usage, "No minikube profile was found. You can create one using `minikube start`.") } api, err := machine.NewAPIClient() if err != nil { @@ -78,7 +75,7 @@ var printProfilesTable = func() { for _, p := range validProfiles { cp, err := config.PrimaryControlPlane(p.Config) if err != nil { - exit.WithError("error getting primary control plane", err) + exit.Error(reason.GuestCpConfig, "error getting primary control plane", err) } p.Status, err = machine.Status(api, driver.MachineName(*p.Config, cp)) if err != nil { @@ -93,9 +90,9 @@ var printProfilesTable = func() { if invalidProfiles != nil { out.WarningT("Found {{.number}} invalid profile(s) ! ", out.V{"number": len(invalidProfiles)}) for _, p := range invalidProfiles { - out.ErrT(out.Empty, "\t "+p.Name) + out.ErrT(style.Empty, "\t "+p.Name) } - out.ErrT(out.Tip, "You can delete them using the following command(s): ") + out.ErrT(style.Tip, "You can delete them using the following command(s): ") for _, p := range invalidProfiles { out.Err(fmt.Sprintf("\t $ minikube delete -p %s \n", p.Name)) } @@ -105,7 +102,6 @@ var printProfilesTable = func() { if err != nil { glog.Warningf("error loading profiles: %v", err) } - } var printProfilesJSON = func() { @@ -119,7 +115,7 @@ var printProfilesJSON = func() { for _, v := range validProfiles { cp, err := config.PrimaryControlPlane(v.Config) if err != nil { - exit.WithError("error getting primary control plane", err) + exit.Error(reason.GuestCpConfig, "error getting primary control plane", err) } status, err := machine.Status(api, driver.MachineName(*v.Config, cp)) if err != nil { @@ -143,7 +139,7 @@ var printProfilesJSON = func() { invalid = []*config.Profile{} } - var body = map[string]interface{}{} + body := map[string]interface{}{} if err == nil || config.IsNotExist(err) { body["valid"] = valid @@ -154,7 +150,7 @@ var printProfilesJSON = func() { body["error"] = err jsonString, _ := json.Marshal(body) out.String(string(jsonString)) - os.Exit(exit.Failure) + os.Exit(reason.ExGuestError) } } diff --git a/cmd/minikube/cmd/config/set.go b/cmd/minikube/cmd/config/set.go index b074af6aef..b7422c9e95 100644 --- a/cmd/minikube/cmd/config/set.go +++ b/cmd/minikube/cmd/config/set.go @@ -23,6 +23,7 @@ import ( "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/reason" ) var configSetCmd = &cobra.Command{ @@ -32,14 +33,14 @@ var configSetCmd = &cobra.Command{ These values can be overwritten by flags or environment variables at runtime.`, Run: func(cmd *cobra.Command, args []string) { if len(args) < 2 { - exit.UsageT("not enough arguments ({{.ArgCount}}).\nusage: minikube config set PROPERTY_NAME PROPERTY_VALUE", out.V{"ArgCount": len(args)}) + exit.Message(reason.Usage, "not enough arguments ({{.ArgCount}}).\nusage: minikube config set PROPERTY_NAME PROPERTY_VALUE", out.V{"ArgCount": len(args)}) } if len(args) > 2 { - exit.UsageT("toom any arguments ({{.ArgCount}}).\nusage: minikube config set PROPERTY_NAME PROPERTY_VALUE", out.V{"ArgCount": len(args)}) + exit.Message(reason.Usage, "toom any arguments ({{.ArgCount}}).\nusage: minikube config set PROPERTY_NAME PROPERTY_VALUE", out.V{"ArgCount": len(args)}) } err := Set(args[0], args[1]) if err != nil { - exit.WithError("Set failed", err) + exit.Error(reason.InternalConfigSet, "Set failed", err) } }, } diff --git a/cmd/minikube/cmd/config/unset.go b/cmd/minikube/cmd/config/unset.go index 122d8ca828..b3581abd37 100644 --- a/cmd/minikube/cmd/config/unset.go +++ b/cmd/minikube/cmd/config/unset.go @@ -21,6 +21,7 @@ import ( config "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/localpath" + "k8s.io/minikube/pkg/minikube/reason" ) var configUnsetCmd = &cobra.Command{ @@ -29,11 +30,11 @@ var configUnsetCmd = &cobra.Command{ Long: "unsets PROPERTY_NAME from the minikube config file. Can be overwritten by flags or environmental variables", Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { - exit.UsageT("usage: minikube config unset PROPERTY_NAME") + exit.Message(reason.Usage, "usage: minikube config unset PROPERTY_NAME") } err := Unset(args[0]) if err != nil { - exit.WithError("unset failed", err) + exit.Error(reason.InternalConfigUnset, "unset failed", err) } }, } diff --git a/cmd/minikube/cmd/config/view.go b/cmd/minikube/cmd/config/view.go index 66418e68dc..a65574079d 100644 --- a/cmd/minikube/cmd/config/view.go +++ b/cmd/minikube/cmd/config/view.go @@ -24,6 +24,7 @@ import ( "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/localpath" + "k8s.io/minikube/pkg/minikube/reason" ) const defaultConfigViewFormat = "- {{.ConfigKey}}: {{.ConfigValue}}\n" @@ -43,7 +44,7 @@ var configViewCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { err := View() if err != nil { - exit.WithError("config view failed", err) + exit.Error(reason.InternalConfigView, "config view failed", err) } }, } @@ -64,12 +65,12 @@ func View() error { for k, v := range cfg { tmpl, err := template.New("view").Parse(viewFormat) if err != nil { - exit.WithError("Error creating view template", err) + exit.Error(reason.InternalViewTmpl, "Error creating view template", err) } viewTmplt := ViewTemplate{k, v} err = tmpl.Execute(os.Stdout, viewTmplt) if err != nil { - exit.WithError("Error executing view template", err) + exit.Error(reason.InternalViewExec, "Error executing view template", err) } } return nil diff --git a/cmd/minikube/cmd/dashboard.go b/cmd/minikube/cmd/dashboard.go index 31516e0c73..e660e03452 100644 --- a/cmd/minikube/cmd/dashboard.go +++ b/cmd/minikube/cmd/dashboard.go @@ -31,12 +31,14 @@ import ( "github.com/spf13/cobra" "k8s.io/minikube/pkg/addons" "k8s.io/minikube/pkg/minikube/assets" + "k8s.io/minikube/pkg/minikube/style" "k8s.io/minikube/pkg/minikube/browser" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/proxy" + "k8s.io/minikube/pkg/minikube/reason" "k8s.io/minikube/pkg/minikube/service" "k8s.io/minikube/pkg/util/retry" ) @@ -72,47 +74,47 @@ var dashboardCmd = &cobra.Command{ if !enabled { // Send status messages to stderr for folks re-using this output. - out.ErrT(out.Enabling, "Enabling dashboard ...") + out.ErrT(style.Enabling, "Enabling dashboard ...") // Enable the dashboard add-on err = addons.SetAndSave(cname, "dashboard", "true") if err != nil { - exit.WithError("Unable to enable dashboard", err) + exit.Error(reason.InternalAddonEnable, "Unable to enable dashboard", err) } } ns := "kubernetes-dashboard" svc := "kubernetes-dashboard" - out.ErrT(out.Verifying, "Verifying dashboard health ...") + out.ErrT(style.Verifying, "Verifying dashboard health ...") checkSVC := func() error { return service.CheckService(cname, ns, svc) } // for slow machines or parallels in CI to avoid #7503 if err = retry.Expo(checkSVC, 100*time.Microsecond, time.Minute*10); err != nil { - exit.WithCodeT(exit.Unavailable, "dashboard service is not running: {{.error}}", out.V{"error": err}) + exit.Message(reason.SvcCheckTimeout, "dashboard service is not running: {{.error}}", out.V{"error": err}) } - out.ErrT(out.Launch, "Launching proxy ...") + out.ErrT(style.Launch, "Launching proxy ...") p, hostPort, err := kubectlProxy(kubectlVersion, cname) if err != nil { - exit.WithError("kubectl proxy", err) + exit.Error(reason.HostKubectlProxy, "kubectl proxy", err) } url := dashboardURL(hostPort, ns, svc) - out.ErrT(out.Verifying, "Verifying proxy health ...") + out.ErrT(style.Verifying, "Verifying proxy health ...") chkURL := func() error { return checkURL(url) } if err = retry.Expo(chkURL, 100*time.Microsecond, 10*time.Minute); err != nil { - exit.WithCodeT(exit.Unavailable, "{{.url}} is not accessible: {{.error}}", out.V{"url": url, "error": err}) + exit.Message(reason.SvcURLTimeout, "{{.url}} is not accessible: {{.error}}", out.V{"url": url, "error": err}) } - //check if current user is root + // check if current user is root user, err := user.Current() if err != nil { - exit.WithError("Unable to get current user", err) + exit.Error(reason.HostCurrentUser, "Unable to get current user", err) } if dashboardURLMode || user.Uid == "0" { out.Ln(url) } else { - out.T(out.Celebrate, "Opening {{.url}} in your default browser...", out.V{"url": url}) + out.T(style.Celebrate, "Opening {{.url}} in your default browser...", out.V{"url": url}) if err = browser.OpenURL(url); err != nil { - exit.WithCodeT(exit.Software, "failed to open browser: {{.error}}", out.V{"error": err}) + exit.Message(reason.HostBrowser, "failed to open browser: {{.error}}", out.V{"error": err}) } } diff --git a/cmd/minikube/cmd/delete.go b/cmd/minikube/cmd/delete.go index 98ed5df2e3..33a31488c1 100644 --- a/cmd/minikube/cmd/delete.go +++ b/cmd/minikube/cmd/delete.go @@ -45,10 +45,14 @@ import ( "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/out/register" + "k8s.io/minikube/pkg/minikube/reason" + "k8s.io/minikube/pkg/minikube/style" ) -var deleteAll bool -var purge bool +var ( + deleteAll bool + purge bool +) // deleteCmd represents the delete command var deleteCmd = &cobra.Command{ @@ -85,7 +89,7 @@ func init() { deleteCmd.Flags().BoolVar(&purge, "purge", false, "Set this flag to delete the '.minikube' folder from your user directory.") if err := viper.BindPFlags(deleteCmd.Flags()); err != nil { - exit.WithError("unable to bind flags", err) + exit.Error(reason.InternalBindFlags, "unable to bind flags", err) } RootCmd.AddCommand(deleteCmd) } @@ -124,9 +128,9 @@ func deleteContainersAndVolumes(ociBin string) { // runDelete handles the executes the flow of "minikube delete" func runDelete(cmd *cobra.Command, args []string) { if len(args) > 0 { - exit.UsageT("Usage: minikube delete") + exit.Message(reason.Usage, "Usage: minikube delete") } - //register.SetEventLogPath(localpath.EventLog(ClusterFlagValue())) + // register.SetEventLogPath(localpath.EventLog(ClusterFlagValue())) register.Reg.SetStep(register.Deleting) validProfiles, invalidProfiles, err := config.ListProfiles() @@ -137,11 +141,11 @@ func runDelete(cmd *cobra.Command, args []string) { // in the case user has more than 1 profile and runs --purge // to prevent abandoned VMs/containers, force user to run with delete --all if purge && len(profilesToDelete) > 1 && !deleteAll { - out.ErrT(out.Notice, "Multiple minikube profiles were found - ") + out.ErrT(style.Notice, "Multiple minikube profiles were found - ") for _, p := range profilesToDelete { - out.T(out.Notice, " - {{.profile}}", out.V{"profile": p.Name}) + out.T(style.Notice, " - {{.profile}}", out.V{"profile": p.Name}) } - exit.UsageT("Usage: minikube delete --all --purge") + exit.Message(reason.Usage, "Usage: minikube delete --all --purge") } if deleteAll { @@ -154,11 +158,11 @@ func runDelete(cmd *cobra.Command, args []string) { if len(errs) > 0 { HandleDeletionErrors(errs) } else { - out.T(out.DeletingHost, "Successfully deleted all profiles") + out.T(style.DeletingHost, "Successfully deleted all profiles") } } else { if len(args) > 0 { - exit.UsageT("usage: minikube delete") + exit.Message(reason.Usage, "usage: minikube delete") } cname := ClusterFlagValue() @@ -166,7 +170,7 @@ func runDelete(cmd *cobra.Command, args []string) { orphan := false if err != nil { - out.ErrT(out.Meh, `"{{.name}}" profile does not exist, trying anyways.`, out.V{"name": cname}) + out.ErrT(style.Meh, `"{{.name}}" profile does not exist, trying anyways.`, out.V{"name": cname}) orphan = true } @@ -193,9 +197,9 @@ func runDelete(cmd *cobra.Command, args []string) { func purgeMinikubeDirectory() { glog.Infof("Purging the '.minikube' directory located at %s", localpath.MiniPath()) if err := os.RemoveAll(localpath.MiniPath()); err != nil { - exit.WithError("unable to delete minikube config folder", err) + exit.Error(reason.HostPurge, "unable to delete minikube config folder", err) } - out.T(out.Deleted, "Successfully purged minikube directory located at - [{{.minikubeDirectory}}]", out.V{"minikubeDirectory": localpath.MiniPath()}) + out.T(style.Deleted, "Successfully purged minikube directory located at - [{{.minikubeDirectory}}]", out.V{"minikubeDirectory": localpath.MiniPath()}) } // DeleteProfiles deletes one or more profiles @@ -204,7 +208,6 @@ func DeleteProfiles(profiles []*config.Profile) []error { var errs []error for _, profile := range profiles { err := deleteProfile(profile) - if err != nil { mm, loadErr := machine.LoadMachine(profile.Name) @@ -244,7 +247,7 @@ func deletePossibleKicLeftOver(cname string, driverName string) { cs, err := oci.ListContainersByLabel(bin, delLabel) if err == nil && len(cs) > 0 { for _, c := range cs { - out.T(out.DeletingHost, `Deleting container "{{.name}}" ...`, out.V{"name": cname}) + out.T(style.DeletingHost, `Deleting container "{{.name}}" ...`, out.V{"name": cname}) err := oci.DeleteContainer(bin, c) if err != nil { // it will error if there is no container to delete glog.Errorf("error deleting container %q. You may want to delete it manually :\n%v", cname, err) @@ -279,7 +282,7 @@ func deleteProfile(profile *config.Profile) error { // if driver is oci driver, delete containers and volumes if driver.IsKIC(profile.Config.Driver) { - out.T(out.DeletingHost, `Deleting "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": profile.Name, "driver_name": profile.Config.Driver}) + out.T(style.DeletingHost, `Deleting "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": profile.Name, "driver_name": profile.Config.Driver}) for _, n := range profile.Config.Nodes { machineName := driver.MachineName(*profile.Config, n) deletePossibleKicLeftOver(machineName, profile.Config.Driver) @@ -330,7 +333,7 @@ func deleteProfile(profile *config.Profile) error { if err := deleteContext(profile.Name); err != nil { return err } - out.T(out.Deleted, `Removed all traces of the "{{.name}}" cluster.`, out.V{"name": profile.Name}) + out.T(style.Deleted, `Removed all traces of the "{{.name}}" cluster.`, out.V{"name": profile.Name}) return nil } @@ -346,7 +349,7 @@ func deleteHosts(api libmachine.API, cc *config.ClusterConfig) { glog.Infof("Host %s does not exist. Proceeding ahead with cleanup.", machineName) default: out.FailureT("Failed to delete cluster: {{.error}}", out.V{"error": err}) - out.T(out.Notice, `You may need to manually remove the "{{.name}}" VM from your hypervisor`, out.V{"name": machineName}) + out.T(style.Notice, `You may need to manually remove the "{{.name}}" VM from your hypervisor`, out.V{"name": machineName}) } } } @@ -377,7 +380,7 @@ func deleteContext(machineName string) error { } func deleteInvalidProfile(profile *config.Profile) []error { - out.T(out.DeletingHost, "Trying to delete invalid profile {{.profile}}", out.V{"profile": profile.Name}) + out.T(style.DeletingHost, "Trying to delete invalid profile {{.profile}}", out.V{"profile": profile.Name}) var errs []error pathToProfile := config.ProfileFolderPath(profile.Name, localpath.MiniPath()) @@ -403,7 +406,7 @@ func profileDeletionErr(cname string, additionalInfo string) error { } func uninstallKubernetes(api libmachine.API, cc config.ClusterConfig, n config.Node, bsName string) error { - out.T(out.Resetting, "Uninstalling Kubernetes {{.kubernetes_version}} using {{.bootstrapper_name}} ...", out.V{"kubernetes_version": cc.KubernetesConfig.KubernetesVersion, "bootstrapper_name": bsName}) + out.T(style.Resetting, "Uninstalling Kubernetes {{.kubernetes_version}} using {{.bootstrapper_name}} ...", out.V{"kubernetes_version": cc.KubernetesConfig.KubernetesVersion, "bootstrapper_name": bsName}) host, err := machine.LoadHost(api, driver.MachineName(cc, n)) if err != nil { return DeletionError{Err: fmt.Errorf("unable to load host: %v", err), Errtype: MissingCluster} @@ -453,19 +456,19 @@ func handleSingleDeletionError(err error) { case Fatal: out.FatalT(deletionError.Error()) case MissingProfile: - out.ErrT(out.Sad, deletionError.Error()) + out.ErrT(style.Sad, deletionError.Error()) case MissingCluster: - out.ErrT(out.Meh, deletionError.Error()) + out.ErrT(style.Meh, deletionError.Error()) default: out.FatalT(deletionError.Error()) } } else { - exit.WithError("Could not process error from failed deletion", err) + exit.Error(reason.GuestDeletion, "Could not process error from failed deletion", err) } } func handleMultipleDeletionErrors(errors []error) { - out.ErrT(out.Sad, "Multiple errors deleting profiles") + out.ErrT(style.Sad, "Multiple errors deleting profiles") for _, err := range errors { deletionError, ok := err.(DeletionError) @@ -473,7 +476,7 @@ func handleMultipleDeletionErrors(errors []error) { if ok { glog.Errorln(deletionError.Error()) } else { - exit.WithError("Could not process errors from failed deletion", err) + exit.Error(reason.GuestDeletion, "Could not process errors from failed deletion", err) } } } @@ -481,10 +484,10 @@ func handleMultipleDeletionErrors(errors []error) { func deleteProfileDirectory(profile string) { machineDir := filepath.Join(localpath.MiniPath(), "machines", profile) if _, err := os.Stat(machineDir); err == nil { - out.T(out.DeletingHost, `Removing {{.directory}} ...`, out.V{"directory": machineDir}) + out.T(style.DeletingHost, `Removing {{.directory}} ...`, out.V{"directory": machineDir}) err := os.RemoveAll(machineDir) if err != nil { - exit.WithError("Unable to remove machine directory", err) + exit.Error(reason.GuestProfileDeletion, "Unable to remove machine directory", err) } } } diff --git a/cmd/minikube/cmd/docker-env.go b/cmd/minikube/cmd/docker-env.go index 56e4ea3a17..9fa12ce63f 100644 --- a/cmd/minikube/cmd/docker-env.go +++ b/cmd/minikube/cmd/docker-env.go @@ -38,6 +38,7 @@ import ( "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/reason" "k8s.io/minikube/pkg/minikube/shell" "k8s.io/minikube/pkg/minikube/sysinit" ) @@ -73,7 +74,7 @@ type EnvNoProxyGetter struct{} func dockerShellCfgSet(ec DockerEnvConfig, envMap map[string]string) *DockerShellConfig { profile := ec.profile const usgPlz = "To point your shell to minikube's docker-daemon, run:" - var usgCmd = fmt.Sprintf("minikube -p %s docker-env", profile) + usgCmd := fmt.Sprintf("minikube -p %s docker-env", profile) s := &DockerShellConfig{ Config: *shell.CfgSet(ec.EnvConfig, usgPlz, usgCmd), } @@ -123,7 +124,7 @@ func isDockerActive(r command.Runner) bool { func mustRestartDocker(name string, runner command.Runner) { if err := sysinit.New(runner).Restart("docker"); err != nil { - exit.WithCodeT(exit.Unavailable, `The Docker service within '{{.name}}' is not active`, out.V{"name": name}) + exit.Message(reason.RuntimeRestart, `The Docker service within '{{.name}}' is not active`, out.V{"name": name}) } } @@ -139,7 +140,7 @@ var dockerEnvCmd = &cobra.Command{ if dockerUnset { if err := dockerUnsetScript(DockerEnvConfig{EnvConfig: sh}, os.Stdout); err != nil { - exit.WithError("Error generating unset output", err) + exit.Error(reason.InternalEnvScript, "Error generating unset output", err) } return } @@ -149,15 +150,15 @@ var dockerEnvCmd = &cobra.Command{ driverName := co.CP.Host.DriverName if driverName == driver.None { - exit.UsageT(`'none' driver does not support 'minikube docker-env' command`) + exit.Message(reason.EnvDriverConflict, `'none' driver does not support 'minikube docker-env' command`) } if len(co.Config.Nodes) > 1 { - exit.WithCodeT(exit.BadUsage, `The docker-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/`) + exit.Message(reason.EnvMultiConflict, `The docker-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/`) } if co.Config.KubernetesConfig.ContainerRuntime != "docker" { - exit.WithCodeT(exit.BadUsage, `The docker-env command is only compatible with the "docker" runtime, but this cluster was configured to use the "{{.runtime}}" runtime.`, + exit.Message(reason.Usage, `The docker-env command is only compatible with the "docker" runtime, but this cluster was configured to use the "{{.runtime}}" runtime.`, out.V{"runtime": co.Config.KubernetesConfig.ContainerRuntime}) } @@ -171,7 +172,7 @@ var dockerEnvCmd = &cobra.Command{ if driver.NeedsPortForward(driverName) { port, err = oci.ForwardedPort(driverName, cname, port) if err != nil { - exit.WithCodeT(exit.Failure, "Error getting port binding for '{{.driver_name}} driver: {{.error}}", out.V{"driver_name": driverName, "error": err}) + exit.Message(reason.DrvPortForward, "Error getting port binding for '{{.driver_name}} driver: {{.error}}", out.V{"driver_name": driverName, "error": err}) } } @@ -188,7 +189,7 @@ var dockerEnvCmd = &cobra.Command{ if ec.Shell == "" { ec.Shell, err = shell.Detect() if err != nil { - exit.WithError("Error detecting shell", err) + exit.Error(reason.InternalShellDetect, "Error detecting shell", err) } } @@ -208,7 +209,7 @@ var dockerEnvCmd = &cobra.Command{ } if err := dockerSetScript(ec, os.Stdout); err != nil { - exit.WithError("Error generating set output", err) + exit.Error(reason.InternalDockerScript, "Error generating set output", err) } }, } diff --git a/cmd/minikube/cmd/generate-docs.go b/cmd/minikube/cmd/generate-docs.go index e87b7270a4..38f99f557b 100644 --- a/cmd/minikube/cmd/generate-docs.go +++ b/cmd/minikube/cmd/generate-docs.go @@ -23,6 +23,8 @@ import ( "k8s.io/minikube/pkg/generate" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/reason" + "k8s.io/minikube/pkg/minikube/style" ) var path string @@ -35,18 +37,17 @@ var generateDocs = &cobra.Command{ Example: "minikube generate-docs --path ", Hidden: true, Run: func(cmd *cobra.Command, args []string) { - // if directory does not exist docsPath, err := os.Stat(path) if err != nil || !docsPath.IsDir() { - exit.UsageT("Unable to generate the documentation. Please ensure that the path specified is a directory, exists & you have permission to write to it.") + exit.Message(reason.Usage, "Unable to generate the documentation. Please ensure that the path specified is a directory, exists & you have permission to write to it.") } // generate docs if err := generate.Docs(RootCmd, path); err != nil { - exit.WithError("Unable to generate docs", err) + exit.Error(reason.InternalGenerateDocs, "Unable to generate docs", err) } - out.T(out.Documentation, "Docs have been saved at - {{.path}}", out.V{"path": path}) + out.T(style.Documentation, "Docs have been saved at - {{.path}}", out.V{"path": path}) }, } diff --git a/cmd/minikube/cmd/logs.go b/cmd/minikube/cmd/logs.go index 5b5708f296..4c19501b1f 100644 --- a/cmd/minikube/cmd/logs.go +++ b/cmd/minikube/cmd/logs.go @@ -28,6 +28,7 @@ import ( "k8s.io/minikube/pkg/minikube/logs" "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/reason" ) const ( @@ -55,17 +56,17 @@ var logsCmd = &cobra.Command{ bs, err := cluster.Bootstrapper(co.API, viper.GetString(cmdcfg.Bootstrapper), *co.Config, co.CP.Runner) if err != nil { - exit.WithError("Error getting cluster bootstrapper", err) + exit.Error(reason.InternalBootstrapper, "Error getting cluster bootstrapper", err) } cr, err := cruntime.New(cruntime.Config{Type: co.Config.KubernetesConfig.ContainerRuntime, Runner: co.CP.Runner}) if err != nil { - exit.WithError("Unable to get runtime", err) + exit.Error(reason.InternalNewRuntime, "Unable to get runtime", err) } if followLogs { err := logs.Follow(cr, bs, *co.Config, co.CP.Runner) if err != nil { - exit.WithError("Follow", err) + exit.Error(reason.InternalLogFollow, "Follow", err) } return } @@ -77,9 +78,9 @@ var logsCmd = &cobra.Command{ err = logs.Output(cr, bs, *co.Config, co.CP.Runner, numberOfLines) if err != nil { out.Ln("") - // Avoid exit.WithError, since it outputs the issue URL + // Avoid exit.Error, since it outputs the issue URL out.WarningT("{{.error}}", out.V{"error": err}) - os.Exit(exit.Unavailable) + os.Exit(reason.ExSvcError) } }, } diff --git a/cmd/minikube/cmd/mount.go b/cmd/minikube/cmd/mount.go index 28ea923e7c..8d268d1cc2 100644 --- a/cmd/minikube/cmd/mount.go +++ b/cmd/minikube/cmd/mount.go @@ -35,6 +35,8 @@ import ( "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/reason" + "k8s.io/minikube/pkg/minikube/style" "k8s.io/minikube/third_party/go9p/ufs" ) @@ -46,15 +48,17 @@ const ( ) // placeholders for flag values -var mountIP string -var mountVersion string -var mountType string -var isKill bool -var uid string -var gid string -var mSize int -var options []string -var mode uint +var ( + mountIP string + mountVersion string + mountType string + isKill bool + uid string + gid string + mSize int + options []string + mode uint +) // supportedFilesystems is a map of filesystem types to not warn against. var supportedFilesystems = map[string]bool{nineP: true} @@ -67,31 +71,31 @@ var mountCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { if isKill { if err := killMountProcess(); err != nil { - exit.WithError("Error killing mount process", err) + exit.Error(reason.HostKillMountProc, "Error killing mount process", err) } os.Exit(0) } if len(args) != 1 { - exit.UsageT(`Please specify the directory to be mounted: + exit.Message(reason.Usage, `Please specify the directory to be mounted: minikube mount : (example: "/host-home:/vm-home")`) } mountString := args[0] idx := strings.LastIndex(mountString, ":") if idx == -1 { // no ":" was present - exit.UsageT(`mount argument "{{.value}}" must be in form: :`, out.V{"value": mountString}) + exit.Message(reason.Usage, `mount argument "{{.value}}" must be in form: :`, out.V{"value": mountString}) } hostPath := mountString[:idx] vmPath := mountString[idx+1:] if _, err := os.Stat(hostPath); err != nil { if os.IsNotExist(err) { - exit.WithCodeT(exit.NoInput, "Cannot find directory {{.path}} for mount", out.V{"path": hostPath}) + exit.Message(reason.HostPathMissing, "Cannot find directory {{.path}} for mount", out.V{"path": hostPath}) } else { - exit.WithError("stat failed", err) + exit.Error(reason.HostPathStat, "stat failed", err) } } if len(vmPath) == 0 || !strings.HasPrefix(vmPath, "/") { - exit.UsageT("Target directory {{.path}} must be an absolute path", out.V{"path": vmPath}) + exit.Message(reason.Usage, "Target directory {{.path}} must be an absolute path", out.V{"path": vmPath}) } var debugVal int if glog.V(1) { @@ -100,7 +104,7 @@ var mountCmd = &cobra.Command{ co := mustload.Running(ClusterFlagValue()) if co.CP.Host.Driver.DriverName() == driver.None { - exit.UsageT(`'none' driver does not support 'minikube mount' command`) + exit.Message(reason.Usage, `'none' driver does not support 'minikube mount' command`) } var ip net.IP @@ -108,17 +112,17 @@ var mountCmd = &cobra.Command{ if mountIP == "" { ip, err = cluster.HostIP(co.CP.Host) if err != nil { - exit.WithError("Error getting the host IP address to use from within the VM", err) + exit.Error(reason.IfHostIP, "Error getting the host IP address to use from within the VM", err) } } else { ip = net.ParseIP(mountIP) if ip == nil { - exit.WithCodeT(exit.Data, "error parsing the input ip address for mount") + exit.Message(reason.IfMountIP, "error parsing the input ip address for mount") } } port, err := getPort() if err != nil { - exit.WithError("Error finding port for mount", err) + exit.Error(reason.IfMountPort, "Error finding port for mount", err) } cfg := &cluster.MountConfig{ @@ -150,7 +154,7 @@ var mountCmd = &cobra.Command{ if driver.IsKIC(co.CP.Host.Driver.DriverName()) && runtime.GOOS != "linux" { bindIP = "127.0.0.1" } - out.T(out.Mounting, "Mounting host path {{.sourcePath}} into VM as {{.destinationPath}} ...", out.V{"sourcePath": hostPath, "destinationPath": vmPath}) + out.T(style.Mounting, "Mounting host path {{.sourcePath}} into VM as {{.destinationPath}} ...", out.V{"sourcePath": hostPath, "destinationPath": vmPath}) out.Infof("Mount type: {{.name}}", out.V{"type": cfg.Type}) out.Infof("User ID: {{.userID}}", out.V{"userID": cfg.UID}) out.Infof("Group ID: {{.groupID}}", out.V{"groupID": cfg.GID}) @@ -164,9 +168,9 @@ var mountCmd = &cobra.Command{ if cfg.Type == nineP { wg.Add(1) go func() { - out.T(out.Fileserver, "Userspace file server: ") + out.T(style.Fileserver, "Userspace file server: ") ufs.StartServer(net.JoinHostPort(bindIP, strconv.Itoa(port)), debugVal, hostPath) - out.T(out.Stopped, "Userspace file server is shutdown") + out.T(style.Stopped, "Userspace file server is shutdown") wg.Done() }() } @@ -176,22 +180,22 @@ var mountCmd = &cobra.Command{ signal.Notify(c, os.Interrupt, syscall.SIGTERM) go func() { for sig := range c { - out.T(out.Unmount, "Unmounting {{.path}} ...", out.V{"path": vmPath}) + out.T(style.Unmount, "Unmounting {{.path}} ...", out.V{"path": vmPath}) err := cluster.Unmount(co.CP.Runner, vmPath) if err != nil { out.FailureT("Failed unmount: {{.error}}", out.V{"error": err}) } - exit.WithCodeT(exit.Interrupted, "Received {{.name}} signal", out.V{"name": sig}) + exit.Message(reason.Interrupted, "Received {{.name}} signal", out.V{"name": sig}) } }() err = cluster.Mount(co.CP.Runner, ip.String(), vmPath, cfg) if err != nil { - exit.WithError("mount failed", err) + exit.Error(reason.GuestMount, "mount failed", err) } - out.T(out.SuccessType, "Successfully mounted {{.sourcePath}} to {{.destinationPath}}", out.V{"sourcePath": hostPath, "destinationPath": vmPath}) + out.T(style.Success, "Successfully mounted {{.sourcePath}} to {{.destinationPath}}", out.V{"sourcePath": hostPath, "destinationPath": vmPath}) out.Ln("") - out.T(out.Notice, "NOTE: This process must stay alive for the mount to be accessible ...") + out.T(style.Notice, "NOTE: This process must stay alive for the mount to be accessible ...") wg.Wait() }, } @@ -203,7 +207,7 @@ func init() { mountCmd.Flags().BoolVar(&isKill, "kill", false, "Kill the mount process spawned by minikube start") mountCmd.Flags().StringVar(&uid, "uid", "docker", "Default user id used for the mount") mountCmd.Flags().StringVar(&gid, "gid", "docker", "Default group id used for the mount") - mountCmd.Flags().UintVar(&mode, "mode", 0755, "File permissions used for the mount") + mountCmd.Flags().UintVar(&mode, "mode", 0o755, "File permissions used for the mount") mountCmd.Flags().StringSliceVar(&options, "options", []string{}, "Additional mount options, such as cache=fscache") mountCmd.Flags().IntVar(&mSize, "msize", defaultMsize, "The number of bytes to use for 9p packet payload") } diff --git a/cmd/minikube/cmd/node.go b/cmd/minikube/cmd/node.go index f851788ac6..5b6fbb4c5b 100644 --- a/cmd/minikube/cmd/node.go +++ b/cmd/minikube/cmd/node.go @@ -19,6 +19,7 @@ package cmd import ( "github.com/spf13/cobra" "k8s.io/minikube/pkg/minikube/exit" + "k8s.io/minikube/pkg/minikube/reason" ) // nodeCmd represents the set of node subcommands @@ -27,6 +28,6 @@ var nodeCmd = &cobra.Command{ Short: "Add, remove, or list additional nodes", Long: "Operations on nodes", Run: func(cmd *cobra.Command, args []string) { - exit.UsageT("Usage: minikube node [add|start|stop|delete|list]") + exit.Message(reason.Usage, "Usage: minikube node [add|start|stop|delete|list]") }, } diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index ab7e422a83..c23a60aa83 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -25,12 +25,15 @@ import ( "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/reason" + "k8s.io/minikube/pkg/minikube/style" ) var ( cp bool worker bool ) + var nodeAddCmd = &cobra.Command{ Use: "add", Short: "Adds a node to the given cluster.", @@ -45,7 +48,7 @@ var nodeAddCmd = &cobra.Command{ name := node.Name(len(cc.Nodes) + 1) - out.T(out.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name}) + out.T(style.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name}) // TODO: Deal with parameters better. Ideally we should be able to acceot any node-specific minikube start params here. n := config.Node{ @@ -66,15 +69,15 @@ var nodeAddCmd = &cobra.Command{ if err := node.Add(cc, n, false); err != nil { _, err := maybeDeleteAndRetry(cmd, *cc, n, nil, err) if err != nil { - exit.WithError("failed to add node", err) + exit.Error(reason.GuestNodeAdd, "failed to add node", err) } } if err := config.SaveProfile(cc.Name, cc); err != nil { - exit.WithError("failed to save config", err) + exit.Error(reason.HostSaveProfile, "failed to save config", err) } - out.T(out.Ready, "Successfully added {{.name}} to {{.cluster}}!", out.V{"name": name, "cluster": cc.Name}) + out.T(style.Ready, "Successfully added {{.name}} to {{.cluster}}!", out.V{"name": name, "cluster": cc.Name}) }, } diff --git a/cmd/minikube/cmd/node_delete.go b/cmd/minikube/cmd/node_delete.go index 634ac92b8c..b5412fb486 100644 --- a/cmd/minikube/cmd/node_delete.go +++ b/cmd/minikube/cmd/node_delete.go @@ -23,6 +23,8 @@ import ( "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/reason" + "k8s.io/minikube/pkg/minikube/style" ) var nodeDeleteCmd = &cobra.Command{ @@ -30,18 +32,17 @@ var nodeDeleteCmd = &cobra.Command{ Short: "Deletes a node from a cluster.", Long: "Deletes a node from a cluster.", Run: func(cmd *cobra.Command, args []string) { - if len(args) == 0 { - exit.UsageT("Usage: minikube node delete [name]") + exit.Message(reason.Usage, "Usage: minikube node delete [name]") } name := args[0] co := mustload.Healthy(ClusterFlagValue()) - out.T(out.DeletingHost, "Deleting node {{.name}} from cluster {{.cluster}}", out.V{"name": name, "cluster": co.Config.Name}) + out.T(style.DeletingHost, "Deleting node {{.name}} from cluster {{.cluster}}", out.V{"name": name, "cluster": co.Config.Name}) n, err := node.Delete(*co.Config, name) if err != nil { - exit.WithError("deleting node", err) + exit.Error(reason.GuestNodeDelete, "deleting node", err) } if driver.IsKIC(co.Config.Driver) { @@ -49,7 +50,7 @@ var nodeDeleteCmd = &cobra.Command{ deletePossibleKicLeftOver(machineName, co.Config.Driver) } - out.T(out.Deleted, "Node {{.name}} was successfully deleted.", out.V{"name": name}) + out.T(style.Deleted, "Node {{.name}} was successfully deleted.", out.V{"name": name}) }, } diff --git a/cmd/minikube/cmd/node_list.go b/cmd/minikube/cmd/node_list.go index 7ec8075424..b78ae674c2 100644 --- a/cmd/minikube/cmd/node_list.go +++ b/cmd/minikube/cmd/node_list.go @@ -25,6 +25,7 @@ import ( "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/mustload" + "k8s.io/minikube/pkg/minikube/reason" ) var nodeListCmd = &cobra.Command{ @@ -33,7 +34,7 @@ var nodeListCmd = &cobra.Command{ Long: "List existing minikube nodes.", Run: func(cmd *cobra.Command, args []string) { if len(args) != 0 { - exit.UsageT("Usage: minikube node list") + exit.Message(reason.Usage, "Usage: minikube node list") } cname := ClusterFlagValue() diff --git a/cmd/minikube/cmd/node_start.go b/cmd/minikube/cmd/node_start.go index bfc8431218..758b77ade7 100644 --- a/cmd/minikube/cmd/node_start.go +++ b/cmd/minikube/cmd/node_start.go @@ -27,6 +27,8 @@ import ( "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/reason" + "k8s.io/minikube/pkg/minikube/style" ) var nodeStartCmd = &cobra.Command{ @@ -35,7 +37,7 @@ var nodeStartCmd = &cobra.Command{ Long: "Starts an existing stopped node in a cluster.", Run: func(cmd *cobra.Command, args []string) { if len(args) == 0 { - exit.UsageT("Usage: minikube node start [name]") + exit.Message(reason.Usage, "Usage: minikube node start [name]") } api, cc := mustload.Partial(ClusterFlagValue()) @@ -43,18 +45,18 @@ var nodeStartCmd = &cobra.Command{ n, _, err := node.Retrieve(*cc, name) if err != nil { - exit.WithError("retrieving node", err) + exit.Error(reason.GuestNodeRetrieve, "retrieving node", err) } machineName := driver.MachineName(*cc, *n) if machine.IsRunning(api, machineName) { - out.T(out.Check, "{{.name}} is already running", out.V{"name": name}) + out.T(style.Check, "{{.name}} is already running", out.V{"name": name}) os.Exit(0) } r, p, m, h, err := node.Provision(cc, n, false, viper.GetBool(deleteOnFailure)) if err != nil { - exit.WithError("provisioning host for node", err) + exit.Error(reason.GuestNodeProvision, "provisioning host for node", err) } s := node.Starter{ @@ -71,11 +73,11 @@ var nodeStartCmd = &cobra.Command{ if err != nil { _, err := maybeDeleteAndRetry(cmd, *cc, *n, nil, err) if err != nil { - node.MaybeExitWithAdvice(err) - exit.WithError("failed to start node", err) + node.ExitIfFatal(err) + exit.Error(reason.GuestNodeStart, "failed to start node", err) } } - out.T(out.Happy, "Successfully started node {{.name}}!", out.V{"name": machineName}) + out.T(style.Happy, "Successfully started node {{.name}}!", out.V{"name": machineName}) }, } diff --git a/cmd/minikube/cmd/node_stop.go b/cmd/minikube/cmd/node_stop.go index 9108bc65b8..c93433e7c6 100644 --- a/cmd/minikube/cmd/node_stop.go +++ b/cmd/minikube/cmd/node_stop.go @@ -24,6 +24,8 @@ import ( "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/reason" + "k8s.io/minikube/pkg/minikube/style" ) var nodeStopCmd = &cobra.Command{ @@ -32,7 +34,7 @@ var nodeStopCmd = &cobra.Command{ Long: "Stops a node in a cluster.", Run: func(cmd *cobra.Command, args []string) { if len(args) == 0 { - exit.UsageT("Usage: minikube node stop [name]") + exit.Message(reason.Usage, "Usage: minikube node stop [name]") } name := args[0] @@ -40,7 +42,7 @@ var nodeStopCmd = &cobra.Command{ n, _, err := node.Retrieve(*cc, name) if err != nil { - exit.WithError("retrieving node", err) + exit.Error(reason.GuestNodeRetrieve, "retrieving node", err) } machineName := driver.MachineName(*cc, *n) @@ -49,7 +51,7 @@ var nodeStopCmd = &cobra.Command{ if err != nil { out.FatalT("Failed to stop node {{.name}}", out.V{"name": name}) } - out.T(out.Stopped, "Successfully stopped node {{.name}}", out.V{"name": machineName}) + out.T(style.Stopped, "Successfully stopped node {{.name}}", out.V{"name": machineName}) }, } diff --git a/cmd/minikube/cmd/pause.go b/cmd/minikube/cmd/pause.go index 8f1db4d5e1..faa704425b 100644 --- a/cmd/minikube/cmd/pause.go +++ b/cmd/minikube/cmd/pause.go @@ -33,6 +33,8 @@ import ( "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/out/register" + "k8s.io/minikube/pkg/minikube/reason" + "k8s.io/minikube/pkg/minikube/style" ) var ( @@ -54,9 +56,9 @@ func runPause(cmd *cobra.Command, args []string) { glog.Infof("namespaces: %v keys: %v", namespaces, viper.AllSettings()) if allNamespaces { - namespaces = nil //all + namespaces = nil // all } else if len(namespaces) == 0 { - exit.WithCodeT(exit.BadUsage, "Use -A to specify all namespaces") + exit.Message(reason.Usage, "Use -A to specify all namespaces") } ids := []string{} @@ -68,35 +70,35 @@ func runPause(cmd *cobra.Command, args []string) { name = co.Config.Name } - out.T(out.Pause, "Pausing node {{.name}} ... ", out.V{"name": name}) + out.T(style.Pause, "Pausing node {{.name}} ... ", out.V{"name": name}) host, err := machine.LoadHost(co.API, driver.MachineName(*co.Config, n)) if err != nil { - exit.WithError("Error getting host", err) + exit.Error(reason.GuestLoadHost, "Error getting host", err) } r, err := machine.CommandRunner(host) if err != nil { - exit.WithError("Failed to get command runner", err) + exit.Error(reason.InternalCommandRunner, "Failed to get command runner", err) } cr, err := cruntime.New(cruntime.Config{Type: co.Config.KubernetesConfig.ContainerRuntime, Runner: r}) if err != nil { - exit.WithError("Failed runtime", err) + exit.Error(reason.InternalNewRuntime, "Failed runtime", err) } uids, err := cluster.Pause(cr, r, namespaces) if err != nil { - exit.WithError("Pause", err) + exit.Error(reason.GuestPause, "Pause", err) } ids = append(ids, uids...) } register.Reg.SetStep(register.Done) if namespaces == nil { - out.T(out.Unpause, "Paused {{.count}} containers", out.V{"count": len(ids)}) + out.T(style.Unpause, "Paused {{.count}} containers", out.V{"count": len(ids)}) } else { - out.T(out.Unpause, "Paused {{.count}} containers in: {{.namespaces}}", out.V{"count": len(ids), "namespaces": strings.Join(namespaces, ", ")}) + out.T(style.Unpause, "Paused {{.count}} containers in: {{.namespaces}}", out.V{"count": len(ids), "namespaces": strings.Join(namespaces, ", ")}) } } diff --git a/cmd/minikube/cmd/podman-env.go b/cmd/minikube/cmd/podman-env.go index e20333e09e..1d4d93c6da 100644 --- a/cmd/minikube/cmd/podman-env.go +++ b/cmd/minikube/cmd/podman-env.go @@ -35,6 +35,7 @@ import ( "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/reason" "k8s.io/minikube/pkg/minikube/shell" ) @@ -47,15 +48,13 @@ type PodmanShellConfig struct { MinikubePodmanProfile string } -var ( - podmanUnset bool -) +var podmanUnset bool // podmanShellCfgSet generates context variables for "podman-env" func podmanShellCfgSet(ec PodmanEnvConfig, envMap map[string]string) *PodmanShellConfig { profile := ec.profile const usgPlz = "To point your shell to minikube's podman service, run:" - var usgCmd = fmt.Sprintf("minikube -p %s podman-env", profile) + usgCmd := fmt.Sprintf("minikube -p %s podman-env", profile) s := &PodmanShellConfig{ Config: *shell.CfgSet(ec.EnvConfig, usgPlz, usgCmd), } @@ -114,7 +113,7 @@ var podmanEnvCmd = &cobra.Command{ if podmanUnset { if err := podmanUnsetScript(PodmanEnvConfig{EnvConfig: sh}, os.Stdout); err != nil { - exit.WithError("Error generating unset output", err) + exit.Error(reason.InternalEnvScript, "Error generating unset output", err) } return } @@ -124,20 +123,20 @@ var podmanEnvCmd = &cobra.Command{ driverName := co.CP.Host.DriverName if driverName == driver.None { - exit.UsageT(`'none' driver does not support 'minikube podman-env' command`) + exit.Message(reason.Usage, `'none' driver does not support 'minikube podman-env' command`) } if len(co.Config.Nodes) > 1 { - exit.WithCodeT(exit.BadUsage, `The podman-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/`) + exit.Message(reason.Usage, `The podman-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/`) } if ok := isPodmanAvailable(co.CP.Runner); !ok { - exit.WithCodeT(exit.Unavailable, `The podman service within '{{.cluster}}' is not active`, out.V{"cluster": cname}) + exit.Message(reason.EnvPodmanUnavailable, `The podman service within '{{.cluster}}' is not active`, out.V{"cluster": cname}) } client, err := createExternalSSHClient(co.CP.Host.Driver) if err != nil { - exit.WithError("Error getting ssh client", err) + exit.Error(reason.IfSSHClient, "Error getting ssh client", err) } ec := PodmanEnvConfig{ @@ -150,12 +149,12 @@ var podmanEnvCmd = &cobra.Command{ if ec.Shell == "" { ec.Shell, err = shell.Detect() if err != nil { - exit.WithError("Error detecting shell", err) + exit.Error(reason.InternalShellDetect, "Error detecting shell", err) } } if err := podmanSetScript(ec, os.Stdout); err != nil { - exit.WithError("Error generating set output", err) + exit.Error(reason.InternalEnvScript, "Error generating set output", err) } }, } diff --git a/cmd/minikube/cmd/root.go b/cmd/minikube/cmd/root.go index 0128b1a8fd..55168af2d1 100644 --- a/cmd/minikube/cmd/root.go +++ b/cmd/minikube/cmd/root.go @@ -20,6 +20,7 @@ import ( goflag "flag" "fmt" "os" + "path/filepath" "runtime" "strings" @@ -34,6 +35,7 @@ import ( "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/localpath" + "k8s.io/minikube/pkg/minikube/reason" "k8s.io/minikube/pkg/minikube/translate" ) @@ -56,15 +58,15 @@ var RootCmd = &cobra.Command{ Long: `minikube provisions and manages local Kubernetes clusters optimized for development workflows.`, PersistentPreRun: func(cmd *cobra.Command, args []string) { for _, path := range dirs { - if err := os.MkdirAll(path, 0777); err != nil { - exit.WithError("Error creating minikube directory", err) + if err := os.MkdirAll(path, 0o777); err != nil { + exit.Error(reason.HostHomeMkdir, "Error creating minikube directory", err) } } logDir := pflag.Lookup("log_dir") if !logDir.Changed { if err := logDir.Value.Set(localpath.MakeMiniPath("logs")); err != nil { - exit.WithError("logdir set failed", err) + exit.Error(reason.InternalFlagSet, "logdir set failed", err) } } }, @@ -73,6 +75,11 @@ var RootCmd = &cobra.Command{ // Execute adds all child commands to the root command sets flags appropriately. // This is called by main.main(). It only needs to happen once to the rootCmd. func Execute() { + _, callingCmd := filepath.Split(os.Args[0]) + + if callingCmd == "kubectl" { + os.Args = append([]string{RootCmd.Use, callingCmd}, os.Args[1:]...) + } for _, c := range RootCmd.Commands() { c.Short = translate.T(c.Short) c.Long = translate.T(c.Long) @@ -105,7 +112,7 @@ func Execute() { if err := RootCmd.Execute(); err != nil { // Cobra already outputs the error, typically because the user provided an unknown command. - os.Exit(exit.BadUsage) + os.Exit(reason.ExProgramUsage) } } @@ -143,7 +150,7 @@ func usageTemplate() string { // by setting them directly, using values from viper when not passed in as args func setFlagsUsingViper() { for _, config := range []string{"alsologtostderr", "log_dir", "v"} { - var a = pflag.Lookup(config) + a := pflag.Lookup(config) viper.SetDefault(a.Name, a.DefValue) // If the flag is set, override viper value if a.Changed { @@ -152,7 +159,7 @@ func setFlagsUsingViper() { // Viper will give precedence first to calls to the Set command, // then to values from the config.yml if err := a.Value.Set(viper.GetString(a.Name)); err != nil { - exit.WithError(fmt.Sprintf("failed to set value for %q", a.Name), err) + exit.Error(reason.InternalFlagSet, fmt.Sprintf("failed to set value for %q", a.Name), err) } a.Changed = true } @@ -229,10 +236,9 @@ func init() { pflag.CommandLine.AddGoFlagSet(goflag.CommandLine) if err := viper.BindPFlags(RootCmd.PersistentFlags()); err != nil { - exit.WithError("Unable to bind flags", err) + exit.Error(reason.InternalBindFlags, "Unable to bind flags", err) } cobra.OnInitialize(initConfig) - } // initConfig reads in config file and ENV variables if set. diff --git a/cmd/minikube/cmd/service.go b/cmd/minikube/cmd/service.go index b4ce0ba84d..f2f6eb0404 100644 --- a/cmd/minikube/cmd/service.go +++ b/cmd/minikube/cmd/service.go @@ -40,7 +40,9 @@ import ( "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/reason" "k8s.io/minikube/pkg/minikube/service" + "k8s.io/minikube/pkg/minikube/style" "k8s.io/minikube/pkg/minikube/tunnel/kic" ) @@ -64,7 +66,7 @@ var serviceCmd = &cobra.Command{ PersistentPreRun: func(cmd *cobra.Command, args []string) { t, err := template.New("serviceURL").Parse(serviceURLFormat) if err != nil { - exit.WithError("The value passed to --format is invalid", err) + exit.Error(reason.InternalFormatUsage, "The value passed to --format is invalid", err) } serviceURLTemplate = t @@ -72,7 +74,7 @@ var serviceCmd = &cobra.Command{ }, Run: func(cmd *cobra.Command, args []string) { if len(args) == 0 || len(args) > 1 { - exit.UsageT("You must specify a service name") + exit.Message(reason.Usage, "You must specify a service name") } svc := args[0] @@ -84,10 +86,10 @@ var serviceCmd = &cobra.Command{ if err != nil { var s *service.SVCNotFoundError if errors.As(err, &s) { - exit.WithCodeT(exit.Data, `Service '{{.service}}' was not found in '{{.namespace}}' namespace. + exit.Message(reason.SvcNotFound, `Service '{{.service}}' was not found in '{{.namespace}}' namespace. You may select another namespace by using 'minikube service {{.service}} -n '. Or list out all the services using 'minikube service list'`, out.V{"service": svc, "namespace": namespace}) } - exit.WithError("Error opening service", err) + exit.Error(reason.SvcTimeout, "Error opening service", err) } if driver.NeedsPortForward(co.Config.Driver) { @@ -107,7 +109,6 @@ func init() { serviceCmd.Flags().IntVar(&interval, "interval", service.DefaultInterval, "The initial time interval for each check that wait performs in seconds") serviceCmd.PersistentFlags().StringVar(&serviceURLFormat, "format", defaultServiceFormatTemplate, "Format to output service URL in. This format will be applied to each url individually and they will be printed one at a time.") - } func startKicServiceTunnel(svc, configName string) { @@ -116,12 +117,12 @@ func startKicServiceTunnel(svc, configName string) { clientset, err := kapi.Client(configName) if err != nil { - exit.WithError("error creating clientset", err) + exit.Error(reason.InternalKubernetesClient, "error creating clientset", err) } port, err := oci.ForwardedPort(oci.Docker, configName, 22) if err != nil { - exit.WithError("error getting ssh port", err) + exit.Error(reason.DrvPortForward, "error getting ssh port", err) } sshPort := strconv.Itoa(port) sshKey := filepath.Join(localpath.MiniPath(), "machines", configName, "id_rsa") @@ -129,7 +130,7 @@ func startKicServiceTunnel(svc, configName string) { serviceTunnel := kic.NewServiceTunnel(sshPort, sshKey, clientset.CoreV1()) urls, err := serviceTunnel.Start(svc, namespace) if err != nil { - exit.WithError("error starting tunnel", err) + exit.Error(reason.SvcTunnelStart, "error starting tunnel", err) } // wait for tunnel to come up @@ -145,7 +146,7 @@ func startKicServiceTunnel(svc, configName string) { err = serviceTunnel.Stop() if err != nil { - exit.WithError("error stopping tunnel", err) + exit.Error(reason.SvcTunnelStop, "error stopping tunnel", err) } } @@ -163,9 +164,9 @@ func openURLs(svc string, urls []string) { continue } - out.T(out.Celebrate, "Opening service {{.namespace_name}}/{{.service_name}} in default browser...", out.V{"namespace_name": namespace, "service_name": svc}) + out.T(style.Celebrate, "Opening service {{.namespace_name}}/{{.service_name}} in default browser...", out.V{"namespace_name": namespace, "service_name": svc}) if err := browser.OpenURL(u); err != nil { - exit.WithError(fmt.Sprintf("open url failed: %s", u), err) + exit.Error(reason.HostBrowser, fmt.Sprintf("open url failed: %s", u), err) } } } diff --git a/cmd/minikube/cmd/service_list.go b/cmd/minikube/cmd/service_list.go index c3837712be..97df14d156 100644 --- a/cmd/minikube/cmd/service_list.go +++ b/cmd/minikube/cmd/service_list.go @@ -24,10 +24,11 @@ import ( "github.com/spf13/cobra" core "k8s.io/api/core/v1" "k8s.io/minikube/pkg/drivers/kic/oci" - "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/reason" "k8s.io/minikube/pkg/minikube/service" + "k8s.io/minikube/pkg/minikube/style" ) var serviceListNamespace string @@ -43,8 +44,8 @@ var serviceListCmd = &cobra.Command{ serviceURLs, err := service.GetServiceURLs(co.API, co.Config.Name, serviceListNamespace, serviceURLTemplate) if err != nil { out.FatalT("Failed to get service URL: {{.error}}", out.V{"error": err}) - out.ErrT(out.Notice, "Check that minikube is running and that you have specified the correct namespace (-n flag) if required.") - os.Exit(exit.Unavailable) + out.ErrT(style.Notice, "Check that minikube is running and that you have specified the correct namespace (-n flag) if required.") + os.Exit(reason.ExSvcUnavailable) } var data [][]string diff --git a/cmd/minikube/cmd/ssh.go b/cmd/minikube/cmd/ssh.go index d9ecb5f056..15f1d72336 100644 --- a/cmd/minikube/cmd/ssh.go +++ b/cmd/minikube/cmd/ssh.go @@ -28,11 +28,10 @@ import ( "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/reason" ) -var ( - nativeSSHClient bool -) +var nativeSSHClient bool // sshCmd represents the docker-ssh command var sshCmd = &cobra.Command{ @@ -43,7 +42,7 @@ var sshCmd = &cobra.Command{ cname := ClusterFlagValue() co := mustload.Running(cname) if co.CP.Host.DriverName == driver.None { - exit.UsageT("'none' driver does not support 'minikube ssh' command") + exit.Message(reason.Usage, "'none' driver does not support 'minikube ssh' command") } var err error @@ -53,7 +52,7 @@ var sshCmd = &cobra.Command{ } else { n, _, err = node.Retrieve(*co.Config, nodeName) if err != nil { - exit.WithCodeT(exit.Unavailable, "Node {{.nodeName}} does not exist.", out.V{"nodeName": nodeName}) + exit.Message(reason.GuestNodeRetrieve, "Node {{.nodeName}} does not exist.", out.V{"nodeName": nodeName}) } } @@ -62,7 +61,7 @@ var sshCmd = &cobra.Command{ // This is typically due to a non-zero exit code, so no need for flourish. out.ErrLn("ssh: %v", err) // It'd be nice if we could pass up the correct error code here :( - os.Exit(exit.Failure) + os.Exit(1) } }, } diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 5c5e4d0ebd..398cdb6c59 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -57,6 +57,8 @@ import ( "k8s.io/minikube/pkg/minikube/notify" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/out/register" + "k8s.io/minikube/pkg/minikube/reason" + "k8s.io/minikube/pkg/minikube/style" "k8s.io/minikube/pkg/minikube/registry" "k8s.io/minikube/pkg/minikube/translate" @@ -77,7 +79,7 @@ func init() { initDriverFlags() initNetworkingFlags() if err := viper.BindPFlags(startCmd.Flags()); err != nil { - exit.WithError("unable to bind flags", err) + exit.Error(reason.InternalBindFlags, "unable to bind flags", err) } } @@ -152,11 +154,11 @@ func runStart(cmd *cobra.Command, args []string) { if !config.ProfileNameValid(ClusterFlagValue()) { out.WarningT("Profile name '{{.name}}' is not valid", out.V{"name": ClusterFlagValue()}) - exit.UsageT("Only alphanumeric and dashes '-' are permitted. Minimum 1 character, starting with alphanumeric.") + exit.Message(reason.Usage, "Only alphanumeric and dashes '-' are permitted. Minimum 1 character, starting with alphanumeric.") } existing, err := config.Load(ClusterFlagValue()) if err != nil && !config.IsNotExist(err) { - exit.WithCodeT(exit.Data, "Unable to load config: {{.error}}", out.V{"error": err}) + exit.Message(reason.HostConfigLoad, "Unable to load config: {{.error}}", out.V{"error": err}) } if existing != nil { @@ -168,11 +170,11 @@ func runStart(cmd *cobra.Command, args []string) { ds, alts, specified := selectDriver(existing) starter, err := provisionWithDriver(cmd, ds, existing) if err != nil { - node.MaybeExitWithAdvice(err) + node.ExitIfFatal(err) machine.MaybeDisplayAdvice(err, ds.Name) if specified { // If the user specified a driver, don't fallback to anything else - exit.WithError("error provisioning host", err) + exit.Error(reason.GuestProvision, "error provisioning host", err) } else { success := false // Walk down the rest of the options @@ -199,7 +201,7 @@ func runStart(cmd *cobra.Command, args []string) { } } if !success { - exit.WithError("error provisioning host", err) + exit.Error(reason.GuestProvision, "error provisioning host", err) } } } @@ -211,20 +213,19 @@ func runStart(cmd *cobra.Command, args []string) { stopProfile(existing.Name) starter, err = provisionWithDriver(cmd, ds, existing) if err != nil { - exit.WithError("error provisioning host", err) + exit.Error(reason.GuestProvision, "error provisioning host", err) } } kubeconfig, err := startWithDriver(cmd, starter, existing) if err != nil { - node.MaybeExitWithAdvice(err) - exit.WithError("failed to start node", err) + node.ExitIfFatal(err) + exit.Error(reason.GuestStart, "failed to start node", err) } if err := showKubectlInfo(kubeconfig, starter.Node.KubernetesVersion, starter.Cfg.Name); err != nil { glog.Errorf("kubectl info: %v", err) } - } func provisionWithDriver(cmd *cobra.Command, ds registry.DriverState, existing *config.ClusterConfig) (node.Starter, error) { @@ -253,7 +254,7 @@ func provisionWithDriver(cmd *cobra.Command, ds registry.DriverState, existing * // This is about as far as we can go without overwriting config files if viper.GetBool(dryRun) { - out.T(out.DryRun, `dry-run validation complete!`) + out.T(style.DryRun, `dry-run validation complete!`) os.Exit(0) } @@ -273,17 +274,17 @@ func provisionWithDriver(cmd *cobra.Command, ds registry.DriverState, existing * } } - mRunner, preExists, mAPI, host, err := node.Provision(&cc, &n, true, viper.GetBool(deleteOnFailure)) - if err != nil { - return node.Starter{}, err - } - if viper.GetBool(nativeSSH) { ssh.SetDefaultClient(ssh.Native) } else { ssh.SetDefaultClient(ssh.External) } + mRunner, preExists, mAPI, host, err := node.Provision(&cc, &n, true, viper.GetBool(deleteOnFailure)) + if err != nil { + return node.Starter{}, err + } + return node.Starter{ Runner: mRunner, PreExists: preExists, @@ -314,7 +315,7 @@ func startWithDriver(cmd *cobra.Command, starter node.Starter, existing *config. } if numNodes > 1 { if driver.BareMetal(starter.Cfg.Driver) { - exit.WithCodeT(exit.Config, "The none driver is not compatible with multi-node clusters.") + exit.Message(reason.DrvUnsupportedMulti, "The none driver is not compatible with multi-node clusters.") } else { // Only warn users on first start. if existing == nil { @@ -353,7 +354,7 @@ func startWithDriver(cmd *cobra.Command, starter node.Starter, existing *config. func warnAboutMultiNode() { out.WarningT("Multi-node clusters are currently experimental and might exhibit unintended behavior.") - out.T(out.Documentation, "To track progress on multi-node clusters, see https://github.com/kubernetes/minikube/issues/7538.") + out.T(style.Documentation, "To track progress on multi-node clusters, see https://github.com/kubernetes/minikube/issues/7538.") } func updateDriver(driverName string) { @@ -372,7 +373,7 @@ func displayVersion(version string) { } register.Reg.SetStep(register.InitialSetup) - out.T(out.Happy, "{{.prefix}}minikube {{.version}} on {{.platform}}", out.V{"prefix": prefix, "version": version, "platform": platform()}) + out.T(style.Happy, "{{.prefix}}minikube {{.version}} on {{.platform}}", out.V{"prefix": prefix, "version": version, "platform": platform()}) } // displayEnviron makes the user aware of environment variables that will affect how minikube operates @@ -390,16 +391,16 @@ func displayEnviron(env []string) { func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName string) error { register.Reg.SetStep(register.Done) if kcs.KeepContext { - out.T(out.Kubectl, "To connect to this cluster, use: kubectl --context={{.name}}", out.V{"name": kcs.ClusterName}) + out.T(style.Kubectl, "To connect to this cluster, use: kubectl --context={{.name}}", out.V{"name": kcs.ClusterName}) } else { - out.T(out.Ready, `Done! kubectl is now configured to use "{{.name}}"`, out.V{"name": machineName}) + out.T(style.Ready, `Done! kubectl is now configured to use "{{.name}}"`, out.V{"name": machineName}) } path, err := exec.LookPath("kubectl") if err != nil { - out.ErrT(out.Kubectl, "Kubectl not found in your path") - out.ErrT(out.Workaround, "You can use kubectl inside minikube. For more information, visit https://minikube.sigs.k8s.io/docs/handbook/kubectl/") - out.ErrT(out.Tip, "For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/") + out.ErrT(style.Kubectl, "Kubectl not found in your path") + out.ErrT(style.Workaround, "You can use kubectl inside minikube. For more information, visit https://minikube.sigs.k8s.io/docs/handbook/kubectl/") + out.ErrT(style.Tip, "For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/") return nil } @@ -421,7 +422,7 @@ func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName st out.Ln("") out.WarningT("{{.path}} is version {{.client_version}}, which may be incompatible with Kubernetes {{.cluster_version}}.", out.V{"path": path, "client_version": client, "cluster_version": cluster}) - out.ErrT(out.Tip, "You can also use 'minikube kubectl -- get pods' to invoke a matching version", + out.ErrT(style.Tip, "You can also use 'minikube kubectl -- get pods' to invoke a matching version", out.V{"path": path, "client_version": client}) } return nil @@ -433,7 +434,7 @@ func maybeDeleteAndRetry(cmd *cobra.Command, existing config.ClusterConfig, n co // Start failed, delete the cluster and try again profile, err := config.LoadProfile(existing.Name) if err != nil { - out.ErrT(out.Meh, `"{{.name}}" profile does not exist, trying anyways.`, out.V{"name": existing.Name}) + out.ErrT(style.Meh, `"{{.name}}" profile does not exist, trying anyways.`, out.V{"name": existing.Name}) } err = deleteProfile(profile) @@ -472,7 +473,7 @@ func maybeDeleteAndRetry(cmd *cobra.Command, existing config.ClusterConfig, n co return kubeconfig, nil } // Don't delete the cluster unless they ask - return nil, errors.Wrap(originalErr, "startup failed") + return nil, originalErr } func kubectlVersion(path string) (string, error) { @@ -508,7 +509,7 @@ func selectDriver(existing *config.ClusterConfig) (registry.DriverState, []regis if existing != nil { old := hostDriver(existing) ds := driver.Status(old) - out.T(out.Sparkle, `Using the {{.driver}} driver based on existing profile`, out.V{"driver": ds.String()}) + out.T(style.Sparkle, `Using the {{.driver}} driver based on existing profile`, out.V{"driver": ds.String()}) return ds, nil, true } @@ -526,9 +527,9 @@ func selectDriver(existing *config.ClusterConfig) (registry.DriverState, []regis } ds := driver.Status(d) if ds.Name == "" { - exit.WithCodeT(exit.Unavailable, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": d, "os": runtime.GOOS}) + exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": d, "os": runtime.GOOS}) } - out.T(out.Sparkle, `Using the {{.driver}} driver based on user configuration`, out.V{"driver": ds.String()}) + out.T(style.Sparkle, `Using the {{.driver}} driver based on user configuration`, out.V{"driver": ds.String()}) return ds, nil, true } @@ -536,21 +537,20 @@ func selectDriver(existing *config.ClusterConfig) (registry.DriverState, []regis if d := viper.GetString("vm-driver"); d != "" { ds := driver.Status(viper.GetString("vm-driver")) if ds.Name == "" { - exit.WithCodeT(exit.Unavailable, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": d, "os": runtime.GOOS}) + exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": d, "os": runtime.GOOS}) } - out.T(out.Sparkle, `Using the {{.driver}} driver based on user configuration`, out.V{"driver": ds.String()}) + out.T(style.Sparkle, `Using the {{.driver}} driver based on user configuration`, out.V{"driver": ds.String()}) return ds, nil, true } choices := driver.Choices(viper.GetBool("vm")) pick, alts, rejects := driver.Suggest(choices) if pick.Name == "" { - out.T(out.ThumbsDown, "Unable to pick a default driver. Here is what was considered, in preference order:") + out.T(style.ThumbsDown, "Unable to pick a default driver. Here is what was considered, in preference order:") for _, r := range rejects { out.Infof("{{ .name }}: {{ .rejection }}", out.V{"name": r.Name, "rejection": r.Rejection}) } - out.T(out.Workaround, "Try specifying a --driver, or see https://minikube.sigs.k8s.io/docs/start/") - os.Exit(exit.Unavailable) + exit.Message(reason.DrvNotDetected, "No possible driver was detected. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/") } if len(alts) > 1 { @@ -558,9 +558,9 @@ func selectDriver(existing *config.ClusterConfig) (registry.DriverState, []regis for _, a := range alts { altNames = append(altNames, a.String()) } - out.T(out.Sparkle, `Automatically selected the {{.driver}} driver. Other choices: {{.alternates}}`, out.V{"driver": pick.Name, "alternates": strings.Join(altNames, ", ")}) + out.T(style.Sparkle, `Automatically selected the {{.driver}} driver. Other choices: {{.alternates}}`, out.V{"driver": pick.Name, "alternates": strings.Join(altNames, ", ")}) } else { - out.T(out.Sparkle, `Automatically selected the {{.driver}} driver`, out.V{"driver": pick.String()}) + out.T(style.Sparkle, `Automatically selected the {{.driver}} driver`, out.V{"driver": pick.String()}) } return pick, alts, false } @@ -618,19 +618,18 @@ func validateSpecifiedDriver(existing *config.ClusterConfig) { return } - out.ErrT(out.Conflict, `The existing "{{.name}}" VM was created using the "{{.old}}" driver, and is incompatible with the "{{.new}}" driver.`, - out.V{"name": existing.Name, "new": requested, "old": old}) - - out.ErrT(out.Workaround, `To proceed, either: - -1) Delete the existing "{{.name}}" cluster using: '{{.delcommand}}' - -* or * - -2) Start the existing "{{.name}}" cluster using: '{{.command}} --driver={{.old}}' -`, out.V{"command": mustload.ExampleCmd(existing.Name, "start"), "delcommand": mustload.ExampleCmd(existing.Name, "delete"), "old": old, "name": existing.Name}) - - exit.WithCodeT(exit.Config, "Exiting.") + exit.Advice( + reason.GuestDrvMismatch, + `The existing "{{.name}}" cluster was created using the "{{.old}}" driver, which is incompatible with requested "{{.new}}" driver.`, + "Delete the existing '{{.name}}' cluster using: '{{.delcommand}}', or start the existing '{{.name}}' cluster using: '{{.command}} --driver={{.old}}'", + out.V{ + "name": existing.Name, + "new": requested, + "old": old, + "command": mustload.ExampleCmd(existing.Name, "start"), + "delcommand": mustload.ExampleCmd(existing.Name, "delete"), + }, + ) } // validateDriver validates that the selected driver appears sane, exits if not @@ -638,7 +637,7 @@ func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) { name := ds.Name glog.Infof("validating driver %q against %+v", name, existing) if !driver.Supported(name) { - exit.WithCodeT(exit.Unavailable, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": name, "os": runtime.GOOS}) + exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": name, "os": runtime.GOOS}) } // if we are only downloading artifacts for a driver, we can stop validation here @@ -649,33 +648,43 @@ func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) { st := ds.State glog.Infof("status for %s: %+v", name, st) - if st.NeedsImprovement { // warn but don't exit - out.ErrLn("") - out.WarningT("'{{.driver}}' driver reported a issue that could affect the performance.", out.V{"driver": name}) - out.ErrT(out.Tip, "Suggestion: {{.fix}}", out.V{"fix": translate.T(st.Fix)}) - out.ErrLn("") + if st.NeedsImprovement { + out.WarnReason(reason.Kind{ + ID: fmt.Sprintf("PROVIDER_%s_IMPROVEMENT", strings.ToUpper(name)), + Advice: translate.T(st.Fix), + Style: style.Improvement, + }, `The '{{.driver}}' driver reported a performance issue`, out.V{"driver": name}) } - if st.Error != nil { - out.ErrLn("") - - out.WarningT("'{{.driver}}' driver reported an issue: {{.error}}", out.V{"driver": name, "error": st.Error}) - out.ErrT(out.Tip, "Suggestion: {{.fix}}", out.V{"fix": translate.T(st.Fix)}) - if st.Doc != "" { - out.ErrT(out.Documentation, "Documentation: {{.url}}", out.V{"url": st.Doc}) - } - out.ErrLn("") - - if !st.Installed { - if existing != nil { - if old := hostDriver(existing); name == old { - exit.WithCodeT(exit.Unavailable, "{{.driver}} does not appear to be installed, but is specified by an existing profile. Please run 'minikube delete' or install {{.driver}}", out.V{"driver": name}) - } - } - exit.WithCodeT(exit.Unavailable, "{{.driver}} does not appear to be installed", out.V{"driver": name}) - } - exitIfNotForced(exit.Unavailable, "Failed to validate '{{.driver}}' driver", out.V{"driver": name}) + if st.Error == nil { + return } + + if !st.Installed { + exit.Message(reason.Kind{ + ID: fmt.Sprintf("PROVIDER_%s_NOT_FOUND", strings.ToUpper(name)), + Advice: translate.T(st.Fix), + ExitCode: reason.ExProviderNotFound, + URL: st.Doc, + Style: style.Shrug, + }, `The '{{.driver}}' provider was not found: {{.error}}`, out.V{"driver": name, "error": st.Error}) + } + + id := fmt.Sprintf("PROVIDER_%s_ERROR", strings.ToUpper(name)) + code := reason.ExProviderUnavailable + + if !st.Running { + id = fmt.Sprintf("PROVIDER_%s_NOT_RUNNING", strings.ToUpper(name)) + code = reason.ExProviderNotRunning + } + + exitIfNotForced(reason.Kind{ + ID: id, + Advice: translate.T(st.Fix), + ExitCode: code, + URL: st.Doc, + Style: style.Fatal, + }, st.Error.Error()) } func selectImageRepository(mirrorCountry string, v semver.Version) (bool, string, error) { @@ -740,31 +749,30 @@ func validateUser(drvName string) { useForce := viper.GetBool(force) if driver.NeedsRoot(drvName) && u.Uid != "0" && !useForce { - exit.WithCodeT(exit.Permissions, `The "{{.driver_name}}" driver requires root privileges. Please run minikube using 'sudo -E minikube start --driver={{.driver_name}}'.`, out.V{"driver_name": drvName}) + exit.Message(reason.DrvNeedsRoot, `The "{{.driver_name}}" driver requires root privileges. Please run minikube using 'sudo -E minikube start --driver={{.driver_name}}'.`, out.V{"driver_name": drvName}) } + // If root is required, or we are not root, exit early if driver.NeedsRoot(drvName) || u.Uid != "0" { return } - out.ErrT(out.Stopped, `The "{{.driver_name}}" driver should not be used with root privileges.`, out.V{"driver_name": drvName}) - out.ErrT(out.Tip, "If you are running minikube within a VM, consider using --driver=none:") - out.ErrT(out.Documentation, " https://minikube.sigs.k8s.io/docs/reference/drivers/none/") + out.ErrT(style.Stopped, `The "{{.driver_name}}" driver should not be used with root privileges.`, out.V{"driver_name": drvName}) + out.ErrT(style.Tip, "If you are running minikube within a VM, consider using --driver=none:") + out.ErrT(style.Documentation, " https://minikube.sigs.k8s.io/docs/reference/drivers/none/") - if !useForce { - os.Exit(exit.Permissions) - } cname := ClusterFlagValue() _, err = config.Load(cname) if err == nil || !config.IsNotExist(err) { - out.ErrT(out.Tip, "Tip: To remove this root owned cluster, run: sudo {{.cmd}}", out.V{"cmd": mustload.ExampleCmd(cname, "delete")}) + out.ErrT(style.Tip, "Tip: To remove this root owned cluster, run: sudo {{.cmd}}", out.V{"cmd": mustload.ExampleCmd(cname, "delete")}) } + if !useForce { - exit.WithCodeT(exit.Permissions, "Exiting") + exit.Message(reason.DrvAsRoot, `The "{{.driver_name}}" driver should not be used with root privileges.`, out.V{"driver_name": drvName}) } } -// memoryLimits returns the amount of memory allocated to the system and hypervisor , the return value is in MB +// memoryLimits returns the amount of memory allocated to the system and hypervisor, the return value is in MiB func memoryLimits(drvName string) (int, int, error) { info, cpuErr, memErr, diskErr := machine.CachedHostInfo() if cpuErr != nil { @@ -792,7 +800,7 @@ func memoryLimits(drvName string) (int, int, error) { return sysLimit, containerLimit, nil } -// suggestMemoryAllocation calculates the default memory footprint in MB +// suggestMemoryAllocation calculates the default memory footprint in MiB func suggestMemoryAllocation(sysLimit int, containerLimit int, nodes int) int { if mem := viper.GetInt(memory); mem != 0 { return mem @@ -830,76 +838,60 @@ func suggestMemoryAllocation(sysLimit int, containerLimit int, nodes int) int { return suggested } -// validateMemoryHardLimit checks if the user system has enough memory at all ! -func validateMemoryHardLimit(drvName string) { - s, c, err := memoryLimits(drvName) - if err != nil { - glog.Warningf("Unable to query memory limits: %v", err) - out.WarningT("Failed to verify system memory limits.") - return - } - if s < 2200 { - out.WarningT("Your system has only {{.memory_amount}}MB memory. This might not work minimum required is 2000MB.", out.V{"memory_amount": s}) - return - } - if driver.IsDockerDesktop(drvName) { - // in Docker Desktop if you allocate 2 GB the docker info shows: Total Memory: 1.945GiB which becomes 1991 when we calculate the MBs - // thats why it is not same number as other drivers which is 2 GB - if c < 1991 { - out.WarningT(`Increase Docker for Desktop memory to at least 2.5GB or more: - - Docker for Desktop > Settings > Resources > Memory - -`) - } - } -} - -// validateMemorySize validates the memory size matches the minimum recommended -func validateMemorySize(req int, drvName string) { +// validateRequestedMemorySize validates the memory size matches the minimum recommended +func validateRequestedMemorySize(req int, drvName string) { + // TODO: Fix MB vs MiB confusion sysLimit, containerLimit, err := memoryLimits(drvName) if err != nil { glog.Warningf("Unable to query memory limits: %v", err) } - // maximm percent of their ram they could allocate to minikube to prevent #8708 - maxAdvised := 0.79 * float64(sysLimit) - // a more sane alternative to their high memory 80% - minAdvised := 0.50 * float64(sysLimit) + // Detect if their system doesn't have enough memory to work with. + if driver.IsKIC(drvName) && containerLimit < minUsableMem { + if driver.IsDockerDesktop(drvName) { + if runtime.GOOS == "darwin" { + exitIfNotForced(reason.RsrcInsufficientDarwinDockerMemory, "Docker Desktop only has {{.size}}MiB available, less than the required {{.req}}MiB for Kubernetes", out.V{"size": containerLimit, "req": minUsableMem, "recommend": "2.25 GB"}) + } else { + exitIfNotForced(reason.RsrcInsufficientWindowsDockerMemory, "Docker Desktop only has {{.size}}MiB available, less than the required {{.req}}MiB for Kubernetes", out.V{"size": containerLimit, "req": minUsableMem, "recommend": "2.25 GB"}) + } + } + exitIfNotForced(reason.RsrcInsufficientContainerMemory, "{{.driver}} only has {{.size}}MiB available, less than the required {{.req}}MiB for Kubernetes", out.V{"size": containerLimit, "driver": drvName, "req": minUsableMem}) + } + + if sysLimit < minUsableMem { + exitIfNotForced(reason.RsrcInsufficientSysMemory, "System only has {{.size}}MiB available, less than the required {{.req}}MiB for Kubernetes", out.V{"size": containerLimit, "driver": drvName, "req": minUsableMem}) + } if req < minUsableMem { - exitIfNotForced(exit.Config, "Requested memory allocation {{.requested}}MB is less than the usable minimum of {{.minimum_memory}}MB", out.V{"requested": req, "minimum_memory": minUsableMem}) + exitIfNotForced(reason.RsrcInsufficientReqMemory, "Requested memory allocation {{.requested}}MiB is less than the usable minimum of {{.minimum_memory}}MB", out.V{"requested": req, "minimum_memory": minUsableMem}) } if req < minRecommendedMem { - out.WarningT("Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommended}}MB. Kubernetes may crash unexpectedly.", - out.V{"requested": req, "recommended": minRecommendedMem}) + out.WarnReason(reason.RsrcInsufficientReqMemory, "Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommend}}MB. Deployments may fail.", out.V{"requested": req, "recommend": minRecommendedMem}) } if driver.IsDockerDesktop(drvName) && containerLimit < 2997 && sysLimit > 8000 { // for users with more than 8 GB advice 3 GB - out.WarningT(`Your system has {{.system_limit}}MB memory but Docker has only {{.container_limit}}MB. For a better performance increase to at least 3GB. - - Docker for Desktop > Settings > Resources > Memory - -`, out.V{"container_limit": containerLimit, "system_limit": sysLimit}) + r := reason.RsrcInsufficientDarwinDockerMemory + if runtime.GOOS == "Windows" { + r = reason.RsrcInsufficientWindowsDockerMemory + } + r.Style = style.Improvement + out.WarnReason(r, "Docker Desktop has access to only {{.size}}MiB of the {{.sys}}MiB in available system memory. Consider increasing this for improved performance.", out.V{"size": containerLimit, "sys": sysLimit, "recommend": "3 GB"}) } + advised := suggestMemoryAllocation(sysLimit, containerLimit, viper.GetInt(nodes)) if req > sysLimit { - message := `Requested memory allocation {{.requested}}MB is more than your system limit {{.system_limit}}MB. Try specifying a lower memory: - - minikube start --memory={{.min_advised}}mb - -` - exitIfNotForced(exit.Config, message, out.V{"requested": req, "system_limit": sysLimit, "max_advised": int32(maxAdvised), "min_advised": minAdvised}) + exitIfNotForced(reason.Kind{ID: "RSRC_OVER_ALLOC_MEM", Advice: "Start minikube with less memory allocated: 'minikube start --memory={{.advised}}mb'"}, + `Requested memory allocation {{.requested}}MB is more than your system limit {{.system_limit}}MB.`, + out.V{"requested": req, "system_limit": sysLimit, "advised": advised}) } - if float64(req) > maxAdvised { - out.WarningT(`You are allocating {{.requested}}MB to memory and your system only has {{.system_limit}}MB. You might face issues. try specifying a lower memory: - - minikube start --memory={{.min_advised}}mb - -`, out.V{"requested": req, "system_limit": sysLimit, "min_advised": minAdvised}) + // Recommend 1GB to handle OS/VM overhead + maxAdvised := sysLimit - 1024 + if req > maxAdvised { + out.WarnReason(reason.Kind{ID: "RSRC_OVER_ALLOC_MEM", Advice: "Start minikube with less memory allocated: 'minikube start --memory={{.advised}}mb'"}, + `The requested memory allocation of {{.requested}}MiB does not leave room for system overhead (total system memory: {{.system_limit}}MiB). You may face stability issues.`, + out.V{"requested": req, "system_limit": sysLimit, "advised": advised}) } - } // validateCPUCount validates the cpu count matches the minimum recommended @@ -916,45 +908,49 @@ func validateCPUCount(drvName string) { } else { cpuCount = viper.GetInt(cpus) } + if cpuCount < minimumCPUS { - exitIfNotForced(exit.BadUsage, "Requested cpu count {{.requested_cpus}} is less than the minimum allowed of {{.minimum_cpus}}", out.V{"requested_cpus": cpuCount, "minimum_cpus": minimumCPUS}) + exitIfNotForced(reason.RsrcInsufficientCores, "Requested cpu count {{.requested_cpus}} is less than the minimum allowed of {{.minimum_cpus}}", out.V{"requested_cpus": cpuCount, "minimum_cpus": minimumCPUS}) } - if driver.IsKIC((drvName)) { - si, err := oci.CachedDaemonInfo(drvName) - if err != nil { - out.T(out.Confused, "Failed to verify '{{.driver_name}} info' will try again ...", out.V{"driver_name": drvName}) - si, err = oci.DaemonInfo(drvName) - if err != nil { - exit.UsageT("Ensure your {{.driver_name}} is running and is healthy.", out.V{"driver_name": driver.FullName(drvName)}) - } + if !driver.IsKIC((drvName)) { + return + } + si, err := oci.CachedDaemonInfo(drvName) + if err != nil { + out.T(style.Confused, "Failed to verify '{{.driver_name}} info' will try again ...", out.V{"driver_name": drvName}) + si, err = oci.DaemonInfo(drvName) + if err != nil { + exit.Message(reason.Usage, "Ensure your {{.driver_name}} is running and is healthy.", out.V{"driver_name": driver.FullName(drvName)}) } - if si.CPUs < 2 { - if drvName == oci.Docker { - out.T(out.Conflict, `Your Docker Desktop has less than 2 CPUs. Increase CPUs for Docker Desktop. - - Docker icon > Settings > Resources > CPUs - - `) - } - out.T(out.Documentation, "https://docs.docker.com/config/containers/resource_constraints/") - exitIfNotForced(exit.BadUsage, "Ensure your {{.driver_name}} system has enough CPUs. The minimum allowed is 2 CPUs.", out.V{"driver_name": driver.FullName(viper.GetString("driver"))}) - } + + } + + // looks good + if si.CPUs >= 2 { + return + } + + if drvName == oci.Docker && runtime.GOOS == "darwin" { + exitIfNotForced(reason.RsrcInsufficientDarwinDockerCores, "Docker Desktop has less than 2 CPUs configured, but Kubernetes requires at least 2 to be available") + } else if drvName == oci.Docker && runtime.GOOS == "windows" { + exitIfNotForced(reason.RsrcInsufficientWindowsDockerCores, "Docker Desktop has less than 2 CPUs configured, but Kubernetes requires at least 2 to be available") + } else { + exitIfNotForced(reason.RsrcInsufficientCores, "{{.driver_name}} has less than 2 CPUs available, but Kubernetes requires at least 2 to be available", out.V{"driver_name": driver.FullName(viper.GetString("driver"))}) } } // validateFlags validates the supplied flags against known bad combinations func validateFlags(cmd *cobra.Command, drvName string) { - if cmd.Flags().Changed(humanReadableDiskSize) { diskSizeMB, err := util.CalculateSizeInMB(viper.GetString(humanReadableDiskSize)) if err != nil { - exitIfNotForced(exit.Config, "Validation unable to parse disk size '{{.diskSize}}': {{.error}}", out.V{"diskSize": viper.GetString(humanReadableDiskSize), "error": err}) + exitIfNotForced(reason.Usage, "Validation unable to parse disk size '{{.diskSize}}': {{.error}}", out.V{"diskSize": viper.GetString(humanReadableDiskSize), "error": err}) } if diskSizeMB < minimumDiskSize { - exitIfNotForced(exit.Config, "Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}", out.V{"requested_size": diskSizeMB, "minimum_size": minimumDiskSize}) + exitIfNotForced(reason.RsrcInsufficientStorage, "Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}", out.V{"requested_size": diskSizeMB, "minimum_size": minimumDiskSize}) } } @@ -963,8 +959,8 @@ func validateFlags(cmd *cobra.Command, drvName string) { out.WarningT("The '{{.name}}' driver does not respect the --cpus flag", out.V{"name": drvName}) } } + validateCPUCount(drvName) - validateMemoryHardLimit(drvName) if cmd.Flags().Changed(memory) { if !driver.HasResourceLimits(drvName) { @@ -972,9 +968,9 @@ func validateFlags(cmd *cobra.Command, drvName string) { } req, err := util.CalculateSizeInMB(viper.GetString(memory)) if err != nil { - exitIfNotForced(exit.Config, "Unable to parse memory '{{.memory}}': {{.error}}", out.V{"memory": viper.GetString(memory), "error": err}) + exitIfNotForced(reason.Usage, "Unable to parse memory '{{.memory}}': {{.error}}", out.V{"memory": viper.GetString(memory), "error": err}) } - validateMemorySize(req, drvName) + validateRequestedMemorySize(req, drvName) } if cmd.Flags().Changed(containerRuntime) { @@ -997,13 +993,13 @@ func validateFlags(cmd *cobra.Command, drvName string) { } if !validRuntime { - exit.UsageT(`Invalid Container Runtime: "{{.runtime}}". Valid runtimes are: {{.validOptions}}`, out.V{"runtime": runtime, "validOptions": strings.Join(cruntime.ValidRuntimes(), ", ")}) + exit.Message(reason.Usage, `Invalid Container Runtime: "{{.runtime}}". Valid runtimes are: {{.validOptions}}`, out.V{"runtime": runtime, "validOptions": strings.Join(cruntime.ValidRuntimes(), ", ")}) } } if driver.BareMetal(drvName) { if ClusterFlagValue() != constants.DefaultClusterName { - exit.WithCodeT(exit.Config, "The '{{.name}} driver does not support multiple profiles: https://minikube.sigs.k8s.io/docs/reference/drivers/none/", out.V{"name": drvName}) + exit.Message(reason.DrvUnsupportedProfile, "The '{{.name}} driver does not support multiple profiles: https://minikube.sigs.k8s.io/docs/reference/drivers/none/", out.V{"name": drvName}) } runtime := viper.GetString(containerRuntime) @@ -1015,20 +1011,19 @@ func validateFlags(cmd *cobra.Command, drvName string) { version, _ := util.ParseKubernetesVersion(getKubernetesVersion(nil)) if version.GTE(semver.MustParse("1.18.0-beta.1")) { if _, err := exec.LookPath("conntrack"); err != nil { - exit.WithCodeT(exit.Config, "Sorry, Kubernetes {{.k8sVersion}} requires conntrack to be installed in root's path", out.V{"k8sVersion": version.String()}) + exit.Message(reason.GuestMissingConntrack, "Sorry, Kubernetes {{.k8sVersion}} requires conntrack to be installed in root's path", out.V{"k8sVersion": version.String()}) } } } // validate kubeadm extra args if invalidOpts := bsutil.FindInvalidExtraConfigFlags(config.ExtraOptions); len(invalidOpts) > 0 { - out.ErrT( - out.Warning, + out.WarningT( "These --extra-config parameters are invalid: {{.invalid_extra_opts}}", out.V{"invalid_extra_opts": invalidOpts}, ) - exit.WithCodeT( - exit.Config, + exit.Message( + reason.Usage, "Valid components are: {{.valid_extra_opts}}", out.V{"valid_extra_opts": bsutil.KubeadmExtraConfigOpts}, ) @@ -1038,12 +1033,12 @@ func validateFlags(cmd *cobra.Command, drvName string) { for param := range config.ExtraOptions.AsMap().Get(bsutil.Kubeadm) { if !config.ContainsParam(bsutil.KubeadmExtraArgsAllowed[bsutil.KubeadmCmdParam], param) && !config.ContainsParam(bsutil.KubeadmExtraArgsAllowed[bsutil.KubeadmConfigParam], param) { - exit.UsageT("Sorry, the kubeadm.{{.parameter_name}} parameter is currently not supported by --extra-config", out.V{"parameter_name": param}) + exit.Message(reason.Usage, "Sorry, the kubeadm.{{.parameter_name}} parameter is currently not supported by --extra-config", out.V{"parameter_name": param}) } } if s := viper.GetString(startOutput); s != "text" && s != "json" { - exit.UsageT("Sorry, please set the --output flag to one of the following valid options: [text,json]") + exit.Message(reason.Usage, "Sorry, please set the --output flag to one of the following valid options: [text,json]") } validateRegistryMirror() @@ -1052,7 +1047,6 @@ func validateFlags(cmd *cobra.Command, drvName string) { // This function validates if the --registry-mirror // args match the format of http://localhost func validateRegistryMirror() { - if len(registryMirror) > 0 { for _, loc := range registryMirror { URL, err := url.Parse(loc) @@ -1060,7 +1054,7 @@ func validateRegistryMirror() { glog.Errorln("Error Parsing URL: ", err) } if (URL.Scheme != "http" && URL.Scheme != "https") || URL.Path != "" { - exit.UsageT("Sorry, the url provided with the --registry-mirror flag is invalid: {{.url}}", out.V{"url": loc}) + exit.Message(reason.Usage, "Sorry, the url provided with the --registry-mirror flag is invalid: {{.url}}", out.V{"url": loc}) } } @@ -1141,11 +1135,11 @@ func validateKubernetesVersion(old *config.ClusterConfig) { oldestVersion, err := semver.Make(strings.TrimPrefix(constants.OldestKubernetesVersion, version.VersionPrefix)) if err != nil { - exit.WithCodeT(exit.Data, "Unable to parse oldest Kubernetes version from constants: {{.error}}", out.V{"error": err}) + exit.Message(reason.InternalSemverParse, "Unable to parse oldest Kubernetes version from constants: {{.error}}", out.V{"error": err}) } defaultVersion, err := semver.Make(strings.TrimPrefix(constants.DefaultKubernetesVersion, version.VersionPrefix)) if err != nil { - exit.WithCodeT(exit.Data, "Unable to parse default Kubernetes version from constants: {{.error}}", out.V{"error": err}) + exit.Message(reason.InternalSemverParse, "Unable to parse default Kubernetes version from constants: {{.error}}", out.V{"error": err}) } if nvs.LT(oldestVersion) { @@ -1153,7 +1147,7 @@ func validateKubernetesVersion(old *config.ClusterConfig) { if !viper.GetBool(force) { out.WarningT("You can force an unsupported Kubernetes version via the --force flag") } - exitIfNotForced(exit.Data, "Kubernetes {{.version}} is not supported by this release of minikube", out.V{"version": nvs}) + exitIfNotForced(reason.KubernetesTooOld, "Kubernetes {{.version}} is not supported by this release of minikube", out.V{"version": nvs}) } if old == nil || old.KubernetesConfig.KubernetesVersion == "" { @@ -1172,26 +1166,12 @@ func validateKubernetesVersion(old *config.ClusterConfig) { } suggestedName := old.Name + "2" - out.T(out.Conflict, "You have selected Kubernetes {{.new}}, but the existing cluster is running Kubernetes {{.old}}", out.V{"new": nvs, "old": ovs, "profile": profileArg}) - exit.WithCodeT(exit.Config, `Non-destructive downgrades are not supported, but you can proceed with one of the following options: - - 1) Recreate the cluster with Kubernetes {{.new}}, by running: - - minikube delete{{.profile}} - minikube start{{.profile}} --kubernetes-version={{.prefix}}{{.new}} - - 2) Create a second cluster with Kubernetes {{.new}}, by running: - - minikube start -p {{.suggestedName}} --kubernetes-version={{.prefix}}{{.new}} - - 3) Use the existing cluster at version Kubernetes {{.old}}, by running: - - minikube start{{.profile}} --kubernetes-version={{.prefix}}{{.old}} - `, out.V{"prefix": version.VersionPrefix, "new": nvs, "old": ovs, "profile": profileArg, "suggestedName": suggestedName}) + exit.Message(reason.KubernetesDowngrade, "Unable to safely downgrade existing Kubernetes v{{.old}} cluster to v{{.new}}", + out.V{"prefix": version.VersionPrefix, "new": nvs, "old": ovs, "profile": profileArg, "suggestedName": suggestedName}) } if defaultVersion.GT(nvs) { - out.T(out.New, "Kubernetes {{.new}} is now available. If you would like to upgrade, specify: --kubernetes-version={{.prefix}}{{.new}}", out.V{"prefix": version.VersionPrefix, "new": defaultVersion}) + out.T(style.New, "Kubernetes {{.new}} is now available. If you would like to upgrade, specify: --kubernetes-version={{.prefix}}{{.new}}", out.V{"prefix": version.VersionPrefix, "new": defaultVersion}) } } @@ -1211,7 +1191,7 @@ func getKubernetesVersion(old *config.ClusterConfig) string { nvs, err := semver.Make(strings.TrimPrefix(paramVersion, version.VersionPrefix)) if err != nil { - exit.WithCodeT(exit.Data, `Unable to parse "{{.kubernetes_version}}": {{.error}}`, out.V{"kubernetes_version": paramVersion, "error": err}) + exit.Message(reason.Usage, `Unable to parse "{{.kubernetes_version}}": {{.error}}`, out.V{"kubernetes_version": paramVersion, "error": err}) } return version.VersionPrefix + nvs.String() @@ -1224,7 +1204,7 @@ func validateDockerStorageDriver(drvName string) { return } if _, err := exec.LookPath(drvName); err != nil { - exit.WithCodeT(exit.BadUsage, "Please make sure {{.DriverName}} is available on your PATH", out.V{"DriverName": drvName}) + exit.Error(reason.DrvNotFound, fmt.Sprintf("%s not found on PATH", drvName), err) } si, err := oci.DaemonInfo(drvName) if err != nil { @@ -1239,9 +1219,9 @@ func validateDockerStorageDriver(drvName string) { viper.Set(preload, false) } -func exitIfNotForced(code int, message string, v out.V) { +func exitIfNotForced(r reason.Kind, message string, v ...out.V) { if !viper.GetBool(force) { - exit.WithCodeT(code, message, v) + exit.Message(r, message, v...) } - out.WarningT(message, v) + out.Error(r, message, v...) } diff --git a/cmd/minikube/cmd/start_flags.go b/cmd/minikube/cmd/start_flags.go index d954bf89c9..a17e6084bc 100644 --- a/cmd/minikube/cmd/start_flags.go +++ b/cmd/minikube/cmd/start_flags.go @@ -38,6 +38,8 @@ import ( "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/proxy" + "k8s.io/minikube/pkg/minikube/reason" + "k8s.io/minikube/pkg/minikube/style" pkgutil "k8s.io/minikube/pkg/util" "k8s.io/minikube/pkg/version" ) @@ -92,8 +94,8 @@ const ( interactive = "interactive" waitTimeout = "wait-timeout" nativeSSH = "native-ssh" - minUsableMem = 1024 // Kubernetes will not start with less than 1GB - minRecommendedMem = 2000 // Warn at no lower than existing configurations + minUsableMem = 1024 // In MiB: Kubernetes will not start with less than 1GiB + minRecommendedMem = 2000 // In MiB: Warn at no lower than existing configurations minimumCPUS = 2 minimumDiskSize = 2000 autoUpdate = "auto-update-drivers" @@ -144,7 +146,7 @@ func initMinikubeFlags() { startCmd.Flags().Bool(preload, true, "If set, download tarball of preloaded images if available to improve start time. Defaults to true.") startCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.") startCmd.Flags().Bool(forceSystemd, false, "If set, force the container runtime to use sytemd as cgroup manager. Currently available for docker and crio. Defaults to false.") - startCmd.Flags().String(startOutput, "text", "Format to print stdout in. Options include: [text,json]") + startCmd.Flags().StringP(startOutput, "o", "text", "Format to print stdout in. Options include: [text,json]") } // initKubernetesFlags inits the commandline flags for Kubernetes related options @@ -230,19 +232,19 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k var err error mem, err = pkgutil.CalculateSizeInMB(viper.GetString(memory)) if err != nil { - exit.WithCodeT(exit.Config, "Generate unable to parse memory '{{.memory}}': {{.error}}", out.V{"memory": viper.GetString(memory), "error": err}) + exit.Message(reason.Usage, "Generate unable to parse memory '{{.memory}}': {{.error}}", out.V{"memory": viper.GetString(memory), "error": err}) } if driver.IsKIC(drvName) && mem > containerLimit { - exit.UsageT("{{.driver_name}} has only {{.container_limit}}MB memory but you specified {{.specified_memory}}MB", out.V{"container_limit": containerLimit, "specified_memory": mem, "driver_name": driver.FullName(drvName)}) + exit.Message(reason.Usage, "{{.driver_name}} has only {{.container_limit}}MB memory but you specified {{.specified_memory}}MB", out.V{"container_limit": containerLimit, "specified_memory": mem, "driver_name": driver.FullName(drvName)}) } } else { - validateMemorySize(mem, drvName) + validateRequestedMemorySize(mem, drvName) glog.Infof("Using suggested %dMB memory alloc based on sys=%dMB, container=%dMB", mem, sysLimit, containerLimit) } diskSize, err := pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize)) if err != nil { - exit.WithCodeT(exit.Config, "Generate unable to parse disk size '{{.diskSize}}': {{.error}}", out.V{"diskSize": viper.GetString(humanReadableDiskSize), "error": err}) + exit.Message(reason.Usage, "Generate unable to parse disk size '{{.diskSize}}': {{.error}}", out.V{"diskSize": viper.GetString(humanReadableDiskSize), "error": err}) } repository := viper.GetString(imageRepository) @@ -250,12 +252,12 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k if strings.ToLower(repository) == "auto" || (mirrorCountry != "" && repository == "") { found, autoSelectedRepository, err := selectImageRepository(mirrorCountry, semver.MustParse(strings.TrimPrefix(k8sVersion, version.VersionPrefix))) if err != nil { - exit.WithError("Failed to check main repository and mirrors for images", err) + exit.Error(reason.InetRepo, "Failed to check main repository and mirrors for images", err) } if !found { if autoSelectedRepository == "" { - exit.WithCodeT(exit.Failure, "None of the known repositories is accessible. Consider specifying an alternative image repository with --image-repository flag") + exit.Message(reason.InetReposUnavailable, "None of the known repositories are accessible. Consider specifying an alternative image repository with --image-repository flag") } else { out.WarningT("None of the known repositories in your location are accessible. Using {{.image_repository_name}} as fallback.", out.V{"image_repository_name": autoSelectedRepository}) } @@ -265,7 +267,7 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k } if cmd.Flags().Changed(imageRepository) || cmd.Flags().Changed(imageMirrorCountry) { - out.T(out.SuccessType, "Using image repository {{.name}}", out.V{"name": repository}) + out.T(style.Success, "Using image repository {{.name}}", out.V{"name": repository}) } // Backwards compatibility with --enable-default-cni @@ -329,7 +331,9 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k }, } cc.VerifyComponents = interpretWaitFlag(*cmd) - + if viper.GetBool(createMount) && driver.IsKIC(drvName) { + cc.ContainerVolumeMounts = []string{viper.GetString(mountString)} + } cnm, err := cni.New(cc) if err != nil { return cc, config.Node{}, errors.Wrap(err, "cni") @@ -428,7 +432,7 @@ func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterC } // validate the memory size in case user changed their system memory limits (example change docker desktop or upgraded memory.) - validateMemorySize(cc.Memory, cc.Driver) + validateRequestedMemorySize(cc.Memory, cc.Driver) if cc.CPUs == 0 { glog.Info("Existing config file was missing cpu. (could be an old minikube config), will use the default value") diff --git a/cmd/minikube/cmd/status.go b/cmd/minikube/cmd/status.go index c4de4264a6..df7184c0c1 100644 --- a/cmd/minikube/cmd/status.go +++ b/cmd/minikube/cmd/status.go @@ -45,12 +45,15 @@ import ( "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/out/register" + "k8s.io/minikube/pkg/minikube/reason" "k8s.io/minikube/pkg/version" ) -var statusFormat string -var output string -var layout string +var ( + statusFormat string + output string + layout string +) const ( // Additional legacy states: @@ -182,9 +185,8 @@ var statusCmd = &cobra.Command{ Exit status contains the status of minikube's VM, cluster and Kubernetes encoded on it's bits in this order from right to left. Eg: 7 meaning: 1 (for minikube NOK) + 2 (for cluster NOK) + 4 (for Kubernetes NOK)`, Run: func(cmd *cobra.Command, args []string) { - if output != "text" && statusFormat != defaultStatusFormat { - exit.UsageT("Cannot use both --output and --format options") + exit.Message(reason.Usage, "Cannot use both --output and --format options") } cname := ClusterFlagValue() @@ -195,7 +197,7 @@ var statusCmd = &cobra.Command{ if nodeName != "" || statusFormat != defaultStatusFormat && len(cc.Nodes) > 1 { n, _, err := node.Retrieve(*cc, nodeName) if err != nil { - exit.WithError("retrieving node", err) + exit.Error(reason.GuestNodeRetrieve, "retrieving node", err) } st, err := nodeStatus(api, *cc, *n) @@ -224,22 +226,22 @@ var statusCmd = &cobra.Command{ case "text": for _, st := range statuses { if err := statusText(st, os.Stdout); err != nil { - exit.WithError("status text failure", err) + exit.Error(reason.InternalStatusText, "status text failure", err) } } case "json": // Layout is currently only supported for JSON mode if layout == "cluster" { if err := clusterStatusJSON(statuses, os.Stdout); err != nil { - exit.WithError("status json failure", err) + exit.Error(reason.InternalStatusJSON, "status json failure", err) } } else { if err := statusJSON(statuses, os.Stdout); err != nil { - exit.WithError("status json failure", err) + exit.Error(reason.InternalStatusJSON, "status json failure", err) } } default: - exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'text', 'json'", output)) + exit.Message(reason.Usage, fmt.Sprintf("invalid output format: %s. Valid values: 'text', 'json'", output)) } os.Exit(exitCode(statuses)) diff --git a/cmd/minikube/cmd/stop.go b/cmd/minikube/cmd/stop.go index 93a01eaeff..72dbce79a0 100644 --- a/cmd/minikube/cmd/stop.go +++ b/cmd/minikube/cmd/stop.go @@ -34,11 +34,15 @@ import ( "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/out/register" + "k8s.io/minikube/pkg/minikube/reason" + "k8s.io/minikube/pkg/minikube/style" "k8s.io/minikube/pkg/util/retry" ) -var stopAll bool -var keepActive bool +var ( + stopAll bool + keepActive bool +) // stopCmd represents the stop command var stopCmd = &cobra.Command{ @@ -50,12 +54,11 @@ itself, leaving all files intact. The cluster can be started again with the "sta } func init() { - stopCmd.Flags().BoolVar(&stopAll, "all", false, "Set flag to stop all profiles (clusters)") stopCmd.Flags().BoolVar(&keepActive, "keep-context-active", false, "keep the kube-context active after cluster is stopped. Defaults to false.") if err := viper.GetViper().BindPFlags(stopCmd.Flags()); err != nil { - exit.WithError("unable to bind flags", err) + exit.Error(reason.InternalFlagsBind, "unable to bind flags", err) } RootCmd.AddCommand(stopCmd) @@ -88,7 +91,7 @@ func runStop(cmd *cobra.Command, args []string) { register.Reg.SetStep(register.Done) if stoppedNodes > 0 { - out.T(out.Stopped, `{{.count}} nodes stopped.`, out.V{"count": stoppedNodes}) + out.T(style.Stopped, `{{.count}} nodes stopped.`, out.V{"count": stoppedNodes}) } } @@ -115,7 +118,7 @@ func stopProfile(profile string) int { if !keepActive { if err := kubeconfig.UnsetCurrentContext(profile, kubeconfig.PathFromEnv()); err != nil { - exit.WithError("update config", err) + exit.Error(reason.HostKubeconfigUnset, "update config", err) } } @@ -134,7 +137,7 @@ func stop(api libmachine.API, machineName string) bool { switch err := errors.Cause(err).(type) { case mcnerror.ErrHostDoesNotExist: - out.T(out.Meh, `"{{.machineName}}" does not exist, nothing to stop`, out.V{"machineName": machineName}) + out.T(style.Meh, `"{{.machineName}}" does not exist, nothing to stop`, out.V{"machineName": machineName}) nonexistent = true return nil default: @@ -143,7 +146,7 @@ func stop(api libmachine.API, machineName string) bool { } if err := retry.Expo(tryStop, 1*time.Second, 120*time.Second, 5); err != nil { - exit.WithError("Unable to stop VM", err) + exit.Error(reason.GuestStopTimeout, "Unable to stop VM", err) } return nonexistent diff --git a/cmd/minikube/cmd/tunnel.go b/cmd/minikube/cmd/tunnel.go index 16fb63d5cb..393d2dc8db 100644 --- a/cmd/minikube/cmd/tunnel.go +++ b/cmd/minikube/cmd/tunnel.go @@ -33,6 +33,7 @@ import ( "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/mustload" + "k8s.io/minikube/pkg/minikube/reason" "k8s.io/minikube/pkg/minikube/tunnel" "k8s.io/minikube/pkg/minikube/tunnel/kic" ) @@ -65,7 +66,7 @@ var tunnelCmd = &cobra.Command{ // doesn't hang on the API server call during startup and shutdown time or if there is a temporary error. clientset, err := kapi.Client(cname) if err != nil { - exit.WithError("error creating clientset", err) + exit.Error(reason.InternalKubernetesClient, "error creating clientset", err) } ctrlC := make(chan os.Signal, 1) @@ -80,7 +81,7 @@ var tunnelCmd = &cobra.Command{ port, err := oci.ForwardedPort(oci.Docker, cname, 22) if err != nil { - exit.WithError("error getting ssh port", err) + exit.Error(reason.DrvPortForward, "error getting ssh port", err) } sshPort := strconv.Itoa(port) sshKey := filepath.Join(localpath.MiniPath(), "machines", cname, "id_rsa") @@ -88,7 +89,7 @@ var tunnelCmd = &cobra.Command{ kicSSHTunnel := kic.NewSSHTunnel(ctx, sshPort, sshKey, clientset.CoreV1()) err = kicSSHTunnel.Start() if err != nil { - exit.WithError("error starting tunnel", err) + exit.Error(reason.SvcTunnelStart, "error starting tunnel", err) } return @@ -96,7 +97,7 @@ var tunnelCmd = &cobra.Command{ done, err := manager.StartTunnel(ctx, cname, co.API, config.DefaultLoader, clientset.CoreV1()) if err != nil { - exit.WithError("error starting tunnel", err) + exit.Error(reason.SvcTunnelStart, "error starting tunnel", err) } <-done }, diff --git a/cmd/minikube/cmd/unpause.go b/cmd/minikube/cmd/unpause.go index 1d01a511e4..5dd7f2b5ba 100644 --- a/cmd/minikube/cmd/unpause.go +++ b/cmd/minikube/cmd/unpause.go @@ -33,6 +33,8 @@ import ( "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/out/register" + "k8s.io/minikube/pkg/minikube/reason" + "k8s.io/minikube/pkg/minikube/style" ) // unpauseCmd represents the docker-pause command @@ -48,10 +50,10 @@ var unpauseCmd = &cobra.Command{ glog.Infof("namespaces: %v keys: %v", namespaces, viper.AllSettings()) if allNamespaces { - namespaces = nil //all + namespaces = nil // all } else { if len(namespaces) == 0 { - exit.WithCodeT(exit.BadUsage, "Use -A to specify all namespaces") + exit.Message(reason.Usage, "Use -A to specify all namespaces") } } @@ -66,27 +68,27 @@ var unpauseCmd = &cobra.Command{ name = co.Config.Name } - out.T(out.Pause, "Unpausing node {{.name}} ... ", out.V{"name": name}) + out.T(style.Pause, "Unpausing node {{.name}} ... ", out.V{"name": name}) machineName := driver.MachineName(*co.Config, n) host, err := machine.LoadHost(co.API, machineName) if err != nil { - exit.WithError("Error getting host", err) + exit.Error(reason.GuestLoadHost, "Error getting host", err) } r, err := machine.CommandRunner(host) if err != nil { - exit.WithError("Failed to get command runner", err) + exit.Error(reason.InternalCommandRunner, "Failed to get command runner", err) } cr, err := cruntime.New(cruntime.Config{Type: co.Config.KubernetesConfig.ContainerRuntime, Runner: r}) if err != nil { - exit.WithError("Failed runtime", err) + exit.Error(reason.InternalNewRuntime, "Failed runtime", err) } uids, err := cluster.Unpause(cr, r, namespaces) if err != nil { - exit.WithError("Pause", err) + exit.Error(reason.GuestUnpause, "Pause", err) } ids = append(ids, uids...) } @@ -94,9 +96,9 @@ var unpauseCmd = &cobra.Command{ register.Reg.SetStep(register.Done) if namespaces == nil { - out.T(out.Pause, "Unpaused {{.count}} containers", out.V{"count": len(ids)}) + out.T(style.Pause, "Unpaused {{.count}} containers", out.V{"count": len(ids)}) } else { - out.T(out.Pause, "Unpaused {{.count}} containers in: {{.namespaces}}", out.V{"count": len(ids), "namespaces": strings.Join(namespaces, ", ")}) + out.T(style.Pause, "Unpaused {{.count}} containers in: {{.namespaces}}", out.V{"count": len(ids), "namespaces": strings.Join(namespaces, ", ")}) } }, } diff --git a/cmd/minikube/cmd/update-check.go b/cmd/minikube/cmd/update-check.go index ec519cae35..2afe16a136 100644 --- a/cmd/minikube/cmd/update-check.go +++ b/cmd/minikube/cmd/update-check.go @@ -21,6 +21,7 @@ import ( "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/notify" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/reason" "k8s.io/minikube/pkg/version" ) @@ -32,11 +33,11 @@ var updateCheckCmd = &cobra.Command{ url := notify.GithubMinikubeReleasesURL r, err := notify.GetAllVersionsFromURL(url) if err != nil { - exit.WithError("Unable to fetch latest version info", err) + exit.Error(reason.InetVersionUnavailable, "Unable to fetch latest version info", err) } if len(r) < 1 { - exit.WithCodeT(exit.Data, "Update server returned an empty list") + exit.Message(reason.InetVersionEmpty, "Update server returned an empty list") } out.Ln("CurrentVersion: %s", version.GetVersion()) diff --git a/cmd/minikube/cmd/update-context.go b/cmd/minikube/cmd/update-context.go index 1063570638..70f3f2e079 100644 --- a/cmd/minikube/cmd/update-context.go +++ b/cmd/minikube/cmd/update-context.go @@ -22,6 +22,8 @@ import ( "k8s.io/minikube/pkg/minikube/kubeconfig" "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/reason" + "k8s.io/minikube/pkg/minikube/style" ) // updateContextCmd represents the update-context command @@ -36,13 +38,12 @@ var updateContextCmd = &cobra.Command{ updated, err := kubeconfig.UpdateEndpoint(cname, co.CP.Hostname, co.CP.Port, kubeconfig.PathFromEnv()) if err != nil { - exit.WithError("update config", err) + exit.Error(reason.HostKubeconfigUpdate, "update config", err) } if updated { - out.T(out.Celebrate, `"{{.context}}" context has been updated to point to {{.hostname}}:{{.port}}`, out.V{"context": cname, "hostname": co.CP.Hostname, "port": co.CP.Port}) + out.T(style.Celebrate, `"{{.context}}" context has been updated to point to {{.hostname}}:{{.port}}`, out.V{"context": cname, "hostname": co.CP.Hostname, "port": co.CP.Port}) } else { - out.T(out.Meh, `No changes required for the "{{.context}}" context`, out.V{"context": cname}) + out.T(style.Meh, `No changes required for the "{{.context}}" context`, out.V{"context": cname}) } - }, } diff --git a/cmd/minikube/cmd/version.go b/cmd/minikube/cmd/version.go index 478a7aab92..d76e511e86 100644 --- a/cmd/minikube/cmd/version.go +++ b/cmd/minikube/cmd/version.go @@ -23,6 +23,7 @@ import ( "gopkg.in/yaml.v2" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/reason" "k8s.io/minikube/pkg/version" ) @@ -51,17 +52,17 @@ var versionCmd = &cobra.Command{ case "json": json, err := json.Marshal(data) if err != nil { - exit.WithError("version json failure", err) + exit.Error(reason.InternalJSONMarshal, "version json failure", err) } out.Ln(string(json)) case "yaml": yaml, err := yaml.Marshal(data) if err != nil { - exit.WithError("version yaml failure", err) + exit.Error(reason.InternalYamlMarshal, "version yaml failure", err) } out.Ln(string(yaml)) default: - exit.WithCodeT(exit.BadUsage, "error: --output must be 'yaml' or 'json'") + exit.Message(reason.InternalOutputUsage, "error: --output must be 'yaml' or 'json'") } }, } diff --git a/deploy/addons/dashboard/dashboard-dp.yaml b/deploy/addons/dashboard/dashboard-dp.yaml index aec28edf63..157db6f22f 100644 --- a/deploy/addons/dashboard/dashboard-dp.yaml +++ b/deploy/addons/dashboard/dashboard-dp.yaml @@ -90,7 +90,7 @@ spec: containers: - name: kubernetes-dashboard # WARNING: This must match pkg/minikube/bootstrapper/images/images.go - image: kubernetesui/dashboard:v2.0.1 + image: kubernetesui/dashboard:v2.0.3 ports: - containerPort: 9090 protocol: TCP diff --git a/deploy/addons/ingress/ingress-dp.yaml.tmpl b/deploy/addons/ingress/ingress-dp.yaml.tmpl index b8c7742a35..fc5916a1fd 100644 --- a/deploy/addons/ingress/ingress-dp.yaml.tmpl +++ b/deploy/addons/ingress/ingress-dp.yaml.tmpl @@ -48,7 +48,7 @@ spec: serviceAccountName: ingress-nginx containers: - name: controller - image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.33.0 + image: us.gcr.io/k8s-artifacts-prod/ingress-nginx/controller:v0.34.1@sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 imagePullPolicy: IfNotPresent lifecycle: preStop: diff --git a/deploy/iso/minikube-iso/package/falco-module/falco-module.hash b/deploy/iso/minikube-iso/package/falco-module/falco-module.hash index b7d39246ac..6944f25b5a 100644 --- a/deploy/iso/minikube-iso/package/falco-module/falco-module.hash +++ b/deploy/iso/minikube-iso/package/falco-module/falco-module.hash @@ -4,7 +4,9 @@ sha256 b873e3590e56ead740ed905108221f98da6100da3c5b7acf2355ea1cf628d931 0.20.0.t sha256 b1c9884855d58be94a97b2e348bcdc7db995800f0405b0f4e9a7176ee2f094a7 0.21.0.tar.gz sha256 11890b1401c197c28ee0a70a364004f58f5ec5526365e9a283699a75e5662773 0.22.0.tar.gz sha256 ed991ffbece8f543f5dc6aa5a660ab1ed4bae771b6aa4930663a3902cc160ea3 0.23.0.tar.gz +sha256 5703d724e0b2ce3b98208549ca9d1abdc9a0298a9abfd748b34863c0c4015dcf 0.24.0.tar.gz # sysdig sha256 6e477ac5fe9d3110b870bd4495f01541373a008c375a1934a2d1c46798b6bad6 146a431edf95829ac11bfd9c85ba3ef08789bffe.tar.gz sha256 1c69363e4c36cdaeed413c2ef557af53bfc4bf1109fbcb6d6e18dc40fe6ddec8 be1ea2d9482d0e6e2cb14a0fd7e08cbecf517f94.tar.gz sha256 766e8952a36a4198fd976b9d848523e6abe4336612188e4fc911e217d8e8a00d 96bd9bc560f67742738eb7255aeb4d03046b8045.tar.gz +sha256 6c3f5f2d699c9540e281f50cbc5cb6b580f0fc689798bc65d4a77f57f932a71c 85c88952b018fdbce2464222c3303229f5bfcfad.tar.gz diff --git a/deploy/iso/minikube-iso/package/falco-module/falco-module.mk b/deploy/iso/minikube-iso/package/falco-module/falco-module.mk index 18ecf90c6a..ececc2493d 100644 --- a/deploy/iso/minikube-iso/package/falco-module/falco-module.mk +++ b/deploy/iso/minikube-iso/package/falco-module/falco-module.mk @@ -4,7 +4,7 @@ # ######################################################################## -FALCO_MODULE_VERSION = 0.23.0 +FALCO_MODULE_VERSION = 0.24.0 FALCO_MODULE_SITE = https://github.com/falcosecurity/falco/archive FALCO_MODULE_SOURCE = $(FALCO_MODULE_VERSION).tar.gz FALCO_MODULE_DEPENDENCIES += ncurses libyaml @@ -12,7 +12,7 @@ FALCO_MODULE_LICENSE = Apache-2.0 FALCO_MODULE_LICENSE_FILES = COPYING # see cmake/modules/sysdig-repo/CMakeLists.txt -FALCO_MODULE_SYSDIG_VERSION = 96bd9bc560f67742738eb7255aeb4d03046b8045 +FALCO_MODULE_SYSDIG_VERSION = 85c88952b018fdbce2464222c3303229f5bfcfad FALCO_MODULE_EXTRA_DOWNLOADS = https://github.com/draios/sysdig/archive/${FALCO_MODULE_SYSDIG_VERSION}.tar.gz define FALCO_MODULE_SYSDIG_SRC diff --git a/deploy/kicbase/Dockerfile b/deploy/kicbase/Dockerfile index 1ecae0c461..776a036013 100644 --- a/deploy/kicbase/Dockerfile +++ b/deploy/kicbase/Dockerfile @@ -56,6 +56,9 @@ RUN echo 'root:root' |chpasswd RUN sed -ri 's/^#?PermitRootLogin\s+.*/PermitRootLogin yes/' /etc/ssh/sshd_config RUN sed -ri 's/UsePAM yes/#UsePAM yes/g' /etc/ssh/sshd_config +# minikube relies on /etc/hosts for control-plane discovery. This prevents nefarious DNS servers from breaking it. +RUN sed -ri 's/dns files/files dns/g' /etc/nsswitch.conf + EXPOSE 22 # create docker user for minikube ssh. to match VM using "docker" as username RUN adduser --ingroup docker --disabled-password --gecos '' docker diff --git a/deploy/kicbase/entrypoint b/deploy/kicbase/entrypoint index e01f7b47b9..5d7770627c 100755 --- a/deploy/kicbase/entrypoint +++ b/deploy/kicbase/entrypoint @@ -82,7 +82,7 @@ fix_cgroup_mounts() { # NOTE: This extracts fields 4 and on # See https://man7.org/linux/man-pages/man5/proc.5.html for field names - cgroup_mounts=$(egrep -o '(/docker|libpod_parent).*/sys/fs/cgroup.*' /proc/self/mountinfo || true) + cgroup_mounts=$(egrep -o '(/docker|libpod_parent|/kubepods).*/sys/fs/cgroup.*' /proc/self/mountinfo || true) if [[ -n "${cgroup_mounts}" ]]; then local mount_root diff --git a/deploy/minikube/release_sanity_test.go b/deploy/minikube/release_sanity_test.go index 8817606af5..8788e9de73 100644 --- a/deploy/minikube/release_sanity_test.go +++ b/deploy/minikube/release_sanity_test.go @@ -46,7 +46,7 @@ func getSHAFromURL(url string) (string, error) { return hex.EncodeToString(b[:]), nil } -func TestReleasesJson(t *testing.T) { +func TestReleasesJSON(t *testing.T) { releases, err := notify.GetAllVersionsFromURL(notify.GithubMinikubeReleasesURL) if err != nil { t.Fatalf("Error getting releases.json: %v", err) diff --git a/pkg/addons/addons.go b/pkg/addons/addons.go index 6272c33481..8235e0cfd9 100644 --- a/pkg/addons/addons.go +++ b/pkg/addons/addons.go @@ -41,7 +41,9 @@ import ( "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/out/register" + "k8s.io/minikube/pkg/minikube/reason" "k8s.io/minikube/pkg/minikube/storageclass" + "k8s.io/minikube/pkg/minikube/style" "k8s.io/minikube/pkg/util/retry" ) @@ -144,7 +146,7 @@ func enableOrDisableAddon(cc *config.ClusterConfig, name string, val string) err // to match both ingress and ingress-dns addons if strings.HasPrefix(name, "ingress") && enable { if driver.IsKIC(cc.Driver) && runtime.GOOS != "linux" { - exit.UsageT(`Due to networking limitations of driver {{.driver_name}} on {{.os_name}}, {{.addon_name}} addon is not supported. + exit.Message(reason.Usage, `Due to networking limitations of driver {{.driver_name}} on {{.os_name}}, {{.addon_name}} addon is not supported. Alternatively to use this addon you can use a vm-based driver: 'minikube start --vm=true' @@ -152,7 +154,7 @@ Alternatively to use this addon you can use a vm-based driver: To track the update on this work in progress feature please check: https://github.com/kubernetes/minikube/issues/7332`, out.V{"driver_name": cc.Driver, "os_name": runtime.GOOS, "addon_name": name}) } else if driver.BareMetal(cc.Driver) { - exit.UsageT(`Due to networking limitations of driver {{.driver_name}}, {{.addon_name}} addon is not supported. Try using a different driver.`, + exit.Message(reason.Usage, `Due to networking limitations of driver {{.driver_name}}, {{.addon_name}} addon is not supported. Try using a different driver.`, out.V{"driver_name": cc.Driver, "addon_name": name}) } } @@ -177,7 +179,7 @@ https://github.com/kubernetes/minikube/issues/7332`, out.V{"driver_name": cc.Dri cp, err := config.PrimaryControlPlane(cc) if err != nil { - exit.WithError("Error getting primary control plane", err) + exit.Error(reason.GuestCpConfig, "Error getting primary control plane", err) } mName := driver.MachineName(*cc, cp) @@ -193,8 +195,8 @@ https://github.com/kubernetes/minikube/issues/7332`, out.V{"driver_name": cc.Dri if err != nil { return errors.Wrap(err, "registry port") } - out.T(out.Tip, `Registry addon on with {{.driver}} uses {{.port}} please use that instead of default 5000`, out.V{"driver": cc.Driver, "port": port}) - out.T(out.Documentation, `For more information see: https://minikube.sigs.k8s.io/docs/drivers/{{.driver}}`, out.V{"driver": cc.Driver}) + out.T(style.Tip, `Registry addon on with {{.driver}} uses {{.port}} please use that instead of default 5000`, out.V{"driver": cc.Driver, "port": port}) + out.T(style.Documentation, `For more information see: https://minikube.sigs.k8s.io/docs/drivers/{{.driver}}`, out.V{"driver": cc.Driver}) } } @@ -334,7 +336,7 @@ func verifyAddonStatusInternal(cc *config.ClusterConfig, name string, val string label, ok := addonPodLabels[name] if ok && enable { - out.T(out.HealthCheck, "Verifying {{.addon_name}} addon...", out.V{"addon_name": name}) + out.T(style.HealthCheck, "Verifying {{.addon_name}} addon...", out.V{"addon_name": name}) client, err := kapi.Client(viper.GetString(config.ProfileName)) if err != nil { return errors.Wrapf(err, "get kube-client to validate %s addon: %v", name, err) @@ -351,7 +353,6 @@ func verifyAddonStatusInternal(cc *config.ClusterConfig, name string, val string // Start enables the default addons for a profile, plus any additional func Start(wg *sync.WaitGroup, cc *config.ClusterConfig, toEnable map[string]bool, additional []string) { - wg.Add(1) defer wg.Done() start := time.Now() @@ -395,7 +396,7 @@ func Start(wg *sync.WaitGroup, cc *config.ClusterConfig, toEnable map[string]boo defer func() { // making it show after verifications( not perfect till #7613 is closed) register.Reg.SetStep(register.EnablingAddons) - out.T(out.AddonEnable, "Enabled addons: {{.addons}}", out.V{"addons": strings.Join(toEnableList, ", ")}) + out.T(style.AddonEnable, "Enabled addons: {{.addons}}", out.V{"addons": strings.Join(toEnableList, ", ")}) }() for _, a := range toEnableList { awg.Add(1) diff --git a/pkg/addons/addons_test.go b/pkg/addons/addons_test.go index 004d0f5047..a85051ee25 100644 --- a/pkg/addons/addons_test.go +++ b/pkg/addons/addons_test.go @@ -26,6 +26,7 @@ import ( "k8s.io/minikube/pkg/minikube/assets" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/localpath" + "k8s.io/minikube/pkg/minikube/tests" ) func createTestProfile(t *testing.T) string { @@ -35,6 +36,13 @@ func createTestProfile(t *testing.T) string { t.Fatalf("tempdir: %v", err) } + t.Cleanup(func() { + err := os.RemoveAll(td) + t.Logf("remove path %q", td) + if err != nil { + t.Errorf("failed to clean up temp folder %q", td) + } + }) err = os.Setenv(localpath.MinikubeHome, td) if err != nil { t.Errorf("error setting up test environment. could not set %s", localpath.MinikubeHome) @@ -122,6 +130,10 @@ func TestSetAndSave(t *testing.T) { } func TestStart(t *testing.T) { + // this test will write a config.json into MinikubeHome, create a temp dir for it + tempDir := tests.MakeTempDir() + defer tests.RemoveTempDir(tempDir) + cc := &config.ClusterConfig{ Name: "start", CPUs: 2, @@ -130,7 +142,8 @@ func TestStart(t *testing.T) { } var wg sync.WaitGroup - Start(&wg, cc, map[string]bool{}, []string{"dashboard"}) + wg.Add(1) + go Start(&wg, cc, map[string]bool{}, []string{"dashboard"}) wg.Wait() if !assets.Addons["dashboard"].IsEnabled(cc) { diff --git a/pkg/addons/gcpauth/enable.go b/pkg/addons/gcpauth/enable.go index 20dd5cef2b..2494c513e3 100644 --- a/pkg/addons/gcpauth/enable.go +++ b/pkg/addons/gcpauth/enable.go @@ -29,6 +29,8 @@ import ( "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/reason" + "k8s.io/minikube/pkg/minikube/style" ) const ( @@ -46,7 +48,6 @@ func EnableOrDisable(cfg *config.ClusterConfig, name string, val string) error { return enableAddon(cfg) } return disableAddon(cfg) - } func enableAddon(cfg *config.ClusterConfig) error { @@ -58,7 +59,7 @@ func enableAddon(cfg *config.ClusterConfig) error { ctx := context.Background() creds, err := google.FindDefaultCredentials(ctx) if err != nil { - exit.WithCodeT(exit.Failure, "Could not find any GCP credentials. Either run `gcloud auth login` or set the GOOGLE_APPLICATION_CREDENTIALS environment variable to the path of your credentials file.") + exit.Message(reason.InternalCredsNotFound, "Could not find any GCP credentials. Either run `gcloud auth login` or set the GOOGLE_APPLICATION_CREDENTIALS environment variable to the path of your credentials file.") } f := assets.NewMemoryAssetTarget(creds.JSON, credentialsPath, "0444") @@ -83,7 +84,7 @@ func enableAddon(cfg *config.ClusterConfig) error { } out.WarningT("Could not determine a Google Cloud project, which might be ok.") - out.T(out.Tip, `To set your Google Cloud project, run: + out.T(style.Tip, `To set your Google Cloud project, run: gcloud config set project @@ -122,8 +123,8 @@ func DisplayAddonMessage(cfg *config.ClusterConfig, name string, val string) err return errors.Wrapf(err, "parsing bool: %s", name) } if enable { - out.T(out.Notice, "Your GCP credentials will now be mounted into every pod created in the {{.name}} cluster.", out.V{"name": cfg.Name}) - out.T(out.Notice, "If you don't want your credentials mounted into a specific pod, add a label with the `gcp-auth-skip-secret` key to your pod configuration.") + out.T(style.Notice, "Your GCP credentials will now be mounted into every pod created in the {{.name}} cluster.", out.V{"name": cfg.Name}) + out.T(style.Notice, "If you don't want your credentials mounted into a specific pod, add a label with the `gcp-auth-skip-secret` key to your pod configuration.") } return nil } diff --git a/pkg/drivers/kic/kic.go b/pkg/drivers/kic/kic.go index 3adcd5b51f..9f66c9ac2f 100644 --- a/pkg/drivers/kic/kic.go +++ b/pkg/drivers/kic/kic.go @@ -68,6 +68,7 @@ func NewDriver(c Config) *Driver { // Create a host using the driver's config func (d *Driver) Create() error { params := oci.CreateParams{ + Mounts: d.NodeConfig.Mounts, Name: d.NodeConfig.MachineName, Image: d.NodeConfig.ImageDigest, ClusterLabel: oci.ProfileLabelKey + "=" + d.MachineName, diff --git a/pkg/drivers/kic/oci/cli_runner.go b/pkg/drivers/kic/oci/cli_runner.go index cfcde88f39..9a5cabd567 100644 --- a/pkg/drivers/kic/oci/cli_runner.go +++ b/pkg/drivers/kic/oci/cli_runner.go @@ -28,6 +28,7 @@ import ( "github.com/golang/glog" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/style" ) // RunResult holds the results of a Runner @@ -100,7 +101,7 @@ func runCmd(cmd *exec.Cmd, warnSlow ...bool) (*RunResult, error) { if warn { // convert exec.Command to with context cmdWithCtx := exec.CommandContext(ctx, cmd.Args[0], cmd.Args[1:]...) - cmdWithCtx.Stdout = cmd.Stdout //copying the original command + cmdWithCtx.Stdout = cmd.Stdout // copying the original command cmdWithCtx.Stderr = cmd.Stderr cmd = cmdWithCtx } @@ -134,7 +135,7 @@ func runCmd(cmd *exec.Cmd, warnSlow ...bool) (*RunResult, error) { out.WarningT(`Executing "{{.command}}" took an unusually long time: {{.duration}}`, out.V{"command": rr.Command(), "duration": elapsed}) // Don't show any restarting hint, when running podman locally (on linux, with sudo). Only when having a service. if cmd.Args[0] != "sudo" { - out.ErrT(out.Tip, `Restarting the {{.name}} service may improve performance.`, out.V{"name": cmd.Args[0]}) + out.ErrT(style.Tip, `Restarting the {{.name}} service may improve performance.`, out.V{"name": cmd.Args[0]}) } } diff --git a/pkg/drivers/kic/oci/network.go b/pkg/drivers/kic/oci/network.go index dc905a2777..d41e8729fd 100644 --- a/pkg/drivers/kic/oci/network.go +++ b/pkg/drivers/kic/oci/network.go @@ -58,9 +58,20 @@ func digDNS(ociBin, containerName, dns string) (net.IP, error) { return ip, nil } +// profileInContainers checks whether the profile is within the containers list +func profileInContainers(profile string, containers []string) bool { + for _, container := range containers { + if container == profile { + return true + } + } + return false +} + // dockerGatewayIP gets the default gateway ip for the docker bridge on the user's host machine // gets the ip from user's host docker func dockerGatewayIP(profile string) (net.IP, error) { + var bridgeID string // check if using custom network first if networkExists(profile) { ip := net.ParseIP(DefaultGateway) @@ -70,8 +81,25 @@ func dockerGatewayIP(profile string) (net.IP, error) { if err != nil { return nil, errors.Wrapf(err, "get network bridge") } + networksOutput := strings.TrimSpace(rr.Stdout.String()) + networksSlice := strings.Fields(networksOutput) + // Look for the minikube container within each docker network + for _, net := range networksSlice { + // get all containers in the network + rs, err := runCmd(exec.Command(Docker, "network", "inspect", net, "-f", "{{range $k, $v := .Containers}}{{$v.Name}} {{end}}")) + if err != nil { + return nil, errors.Wrapf(err, "get containers in network") + } + containersSlice := strings.Fields(rs.Stdout.String()) + if profileInContainers(profile, containersSlice) { + bridgeID = net + break + } + } - bridgeID := strings.TrimSpace(rr.Stdout.String()) + if bridgeID == "" { + return nil, errors.Errorf("unable to determine bridge network id from %q", networksOutput) + } rr, err = runCmd(exec.Command(Docker, "network", "inspect", "--format", "{{(index .IPAM.Config 0).Gateway}}", bridgeID)) if err != nil { diff --git a/pkg/drivers/kic/oci/types.go b/pkg/drivers/kic/oci/types.go index 9ede5b7032..8a740eae2c 100644 --- a/pkg/drivers/kic/oci/types.go +++ b/pkg/drivers/kic/oci/types.go @@ -16,6 +16,13 @@ limitations under the License. package oci +import ( + "errors" + "fmt" + "path/filepath" + "strings" +) + const ( // DefaultBindIPV4 is The default IP the container will listen on. DefaultBindIPV4 = "127.0.0.1" @@ -102,6 +109,46 @@ type Mount struct { Propagation MountPropagation `protobuf:"varint,5,opt,name=propagation,proto3,enum=runtime.v1alpha2.MountPropagation" json:"propagation,omitempty"` } +// ParseMountString parses a mount string of format: +// '[host-path:]container-path[:]' The comma-delimited 'options' are +// [rw|ro], [Z], [srhared|rslave|rprivate]. +func ParseMountString(spec string) (m Mount, err error) { + switch fields := strings.Split(spec, ":"); len(fields) { + case 0: + err = errors.New("invalid empty spec") + case 1: + m.ContainerPath = fields[0] + case 3: + for _, opt := range strings.Split(fields[2], ",") { + switch opt { + case "Z": + m.SelinuxRelabel = true + case "ro": + m.Readonly = true + case "rw": + m.Readonly = false + case "rslave": + m.Propagation = MountPropagationHostToContainer + case "rshared": + m.Propagation = MountPropagationBidirectional + case "private": + m.Propagation = MountPropagationNone + default: + err = fmt.Errorf("unknown mount option: '%s'", opt) + } + } + fallthrough + case 2: + m.HostPath, m.ContainerPath = fields[0], fields[1] + if !filepath.IsAbs(m.ContainerPath) { + err = fmt.Errorf("'%s' container path must be absolute", m.ContainerPath) + } + default: + err = errors.New("spec must be in form: :[:]") + } + return m, err +} + // PortMapping specifies a host port mapped into a container port. // In yaml this looks like: // containerPort: 80 diff --git a/pkg/drivers/kic/types.go b/pkg/drivers/kic/types.go index a374da2a13..5d138a0ea8 100644 --- a/pkg/drivers/kic/types.go +++ b/pkg/drivers/kic/types.go @@ -24,9 +24,9 @@ import ( const ( // Version is the current version of kic - Version = "v0.0.12-snapshot" + Version = "v0.0.12-snapshot3" // SHA of the kic base image - baseImageSHA = "7be40a42fdfec56fbf7bc9de07ea2ed4a931cbb70dccb8612b2ba13763bf4568" + baseImageSHA = "1d687ba53e19dbe5fafe4cc18aa07f269ecc4b7b622f2251b5bf569ddb474e9b" ) var ( diff --git a/pkg/generate/docs.go b/pkg/generate/docs.go index 14a1aa0bdf..2ae8b95dc8 100644 --- a/pkg/generate/docs.go +++ b/pkg/generate/docs.go @@ -124,7 +124,7 @@ func writeSubcommands(command *cobra.Command, w io.Writer) error { func generateTitle(command *cobra.Command, w io.Writer) error { date := time.Now().Format("2006-01-02") - title := out.ApplyTemplateFormatting(9999, false, title, out.V{"Command": command.Name(), "Description": command.Short, "Date": date}) + title := out.Fmt(title, out.V{"Command": command.Name(), "Description": command.Short, "Date": date}) _, err := w.Write([]byte(title)) return err } @@ -134,5 +134,5 @@ func saveDocForCommand(command *cobra.Command, contents []byte, path string) err if err := os.Remove(fp); err != nil { glog.Warningf("error removing %s", fp) } - return ioutil.WriteFile(fp, contents, 0644) + return ioutil.WriteFile(fp, contents, 0o644) } diff --git a/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta1.go b/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta1.go index d4813642ea..342425f82d 100644 --- a/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta1.go +++ b/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta1.go @@ -65,12 +65,6 @@ etcd: dataDir: {{.EtcdDataDir}} extraArgs: listen-metrics-urls: http://127.0.0.1:2381,http://{{.AdvertiseAddress}}:2381 -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: {{.KubernetesVersion}} networking: dnsDomain: {{if .DNSDomain}}{{.DNSDomain}}{{else}}cluster.local{{end}} diff --git a/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta2.go b/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta2.go index 23bb0665f1..9e1cc650d5 100644 --- a/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta2.go +++ b/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta2.go @@ -68,12 +68,6 @@ etcd: {{- range $i, $val := printMapInOrder .EtcdExtraArgs ": " }} {{$val}} {{- end}} -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: {{.KubernetesVersion}} networking: dnsDomain: {{if .DNSDomain}}{{.DNSDomain}}{{else}}cluster.local{{end}} diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-api-port.yaml index 2bd931ce86..7e367f9763 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-api-port.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:12345 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381 -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.14.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-pod-network-cidr.yaml index 35686aff25..49e49992ec 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-pod-network-cidr.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381 -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.14.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd.yaml index 6df17df9a0..4d2e6ef796 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381 -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.14.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio-options-gates.yaml index 4b8c27e8ba..b300459369 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio-options-gates.yaml @@ -29,9 +29,11 @@ controllerManager: extraArgs: feature-gates: "a=b" kube-api-burst: "32" + leader-elect: "false" scheduler: extraArgs: feature-gates: "a=b" + leader-elect: "false" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs clusterName: mk @@ -43,12 +45,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381 -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.14.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio.yaml index 1a4fdd56bc..a447921c99 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381 -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.14.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/default.yaml index ce9946026f..9a2be297b6 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/default.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381 -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.14.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/dns.yaml index 369af2a1cf..5c24065898 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/dns.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381 -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.14.0 networking: dnsDomain: 1.1.1.1 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/image-repository.yaml index 1882af8429..8e18b41e91 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/image-repository.yaml @@ -24,6 +24,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -34,12 +40,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381 -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.14.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/options.yaml index 36abbc58b8..ae0e3b33bb 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/options.yaml @@ -27,8 +27,10 @@ apiServer: controllerManager: extraArgs: kube-api-burst: "32" + leader-elect: "false" scheduler: extraArgs: + leader-elect: "false" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs clusterName: mk @@ -40,12 +42,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381 -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.14.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-api-port.yaml index 51dc8711c7..f847dc27ca 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-api-port.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:12345 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381 -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.15.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-pod-network-cidr.yaml index 32bff46119..acc7c1cc3e 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-pod-network-cidr.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381 -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.15.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd.yaml index ab76cbaee5..0fe66c2290 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381 -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.15.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio-options-gates.yaml index 73ea4bf1ae..886ecc55fd 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio-options-gates.yaml @@ -29,9 +29,11 @@ controllerManager: extraArgs: feature-gates: "a=b" kube-api-burst: "32" + leader-elect: "false" scheduler: extraArgs: feature-gates: "a=b" + leader-elect: "false" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs clusterName: mk @@ -43,12 +45,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381 -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.15.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio.yaml index 8c4a3494b1..f797584ae6 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381 -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.15.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/default.yaml index 158eb53826..a1ff88dfe0 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/default.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381 -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.15.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/dns.yaml index 0d8183a059..91132a6da2 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/dns.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381 -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.15.0 networking: dnsDomain: 1.1.1.1 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/image-repository.yaml index f4ba9146de..753e0f5808 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/image-repository.yaml @@ -24,6 +24,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -34,12 +40,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381 -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.15.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/options.yaml index b6de73bf54..ac34fb54c9 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/options.yaml @@ -27,8 +27,10 @@ apiServer: controllerManager: extraArgs: kube-api-burst: "32" + leader-elect: "false" scheduler: extraArgs: + leader-elect: "false" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs clusterName: mk @@ -40,12 +42,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381 -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.15.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-api-port.yaml index 90ebee7d8d..43c3a51676 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-api-port.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:12345 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381 -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.16.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-pod-network-cidr.yaml index 376d2051ee..541fe28d7d 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-pod-network-cidr.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381 -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.16.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd.yaml index 7056670795..c1220ac31c 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381 -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.16.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio-options-gates.yaml index 075cd7b3a3..1dcc285066 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio-options-gates.yaml @@ -29,9 +29,11 @@ controllerManager: extraArgs: feature-gates: "a=b" kube-api-burst: "32" + leader-elect: "false" scheduler: extraArgs: feature-gates: "a=b" + leader-elect: "false" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs clusterName: mk @@ -43,12 +45,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381 -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.16.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio.yaml index 1458b349e1..bcbe3e1adb 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381 -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.16.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/default.yaml index b045ccc6ac..d3bddc93ff 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/default.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381 -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.16.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/dns.yaml index 9087314332..2167eec380 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/dns.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381 -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.16.0 networking: dnsDomain: 1.1.1.1 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/image-repository.yaml index 421afab0e1..a5fabb7591 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/image-repository.yaml @@ -24,6 +24,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -34,12 +40,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381 -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.16.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/options.yaml index fc83b1cf6b..fb7d61c5bd 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/options.yaml @@ -27,8 +27,10 @@ apiServer: controllerManager: extraArgs: kube-api-burst: "32" + leader-elect: "false" scheduler: extraArgs: + leader-elect: "false" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs clusterName: mk @@ -40,12 +42,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: listen-metrics-urls: http://127.0.0.1:2381,http://1.1.1.1:2381 -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.16.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-api-port.yaml index 10b16c79ef..88319aefdf 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-api-port.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:12345 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: proxy-refresh-interval: "70000" -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.17.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-pod-network-cidr.yaml index 3cd14fc1ac..055a20bd15 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-pod-network-cidr.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: proxy-refresh-interval: "70000" -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.17.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd.yaml index 928385bea4..bfb66f2e43 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: proxy-refresh-interval: "70000" -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.17.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio-options-gates.yaml index b8309685d7..4451a1c2c1 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio-options-gates.yaml @@ -29,9 +29,11 @@ controllerManager: extraArgs: feature-gates: "a=b" kube-api-burst: "32" + leader-elect: "false" scheduler: extraArgs: feature-gates: "a=b" + leader-elect: "false" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs clusterName: mk @@ -43,12 +45,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: proxy-refresh-interval: "70000" -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.17.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio.yaml index 176dc7885e..fa9d43a70b 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: proxy-refresh-interval: "70000" -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.17.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/default.yaml index 91decc51dc..bd43de8704 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/default.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: proxy-refresh-interval: "70000" -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.17.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/dns.yaml index ad58257e9e..0447ecc4ee 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/dns.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: proxy-refresh-interval: "70000" -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.17.0 networking: dnsDomain: 1.1.1.1 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/image-repository.yaml index ebe9637fb1..47a5f98af5 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/image-repository.yaml @@ -24,6 +24,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -34,12 +40,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: proxy-refresh-interval: "70000" -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.17.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/options.yaml index b7558c4a57..210d15bf35 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/options.yaml @@ -27,8 +27,10 @@ apiServer: controllerManager: extraArgs: kube-api-burst: "32" + leader-elect: "false" scheduler: extraArgs: + leader-elect: "false" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs clusterName: mk @@ -40,12 +42,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: proxy-refresh-interval: "70000" -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.17.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-api-port.yaml index 7176081d05..499011c336 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-api-port.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:12345 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: proxy-refresh-interval: "70000" -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.18.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-pod-network-cidr.yaml index 4ef6e03203..979a943896 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-pod-network-cidr.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: proxy-refresh-interval: "70000" -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.18.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd.yaml index ab01ac609f..c13bc4d030 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: proxy-refresh-interval: "70000" -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.18.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio-options-gates.yaml index 5393f1ea8f..78f6655b66 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio-options-gates.yaml @@ -29,9 +29,11 @@ controllerManager: extraArgs: feature-gates: "a=b" kube-api-burst: "32" + leader-elect: "false" scheduler: extraArgs: feature-gates: "a=b" + leader-elect: "false" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs clusterName: mk @@ -43,12 +45,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: proxy-refresh-interval: "70000" -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.18.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio.yaml index 8c904fafc8..33a8a87757 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: proxy-refresh-interval: "70000" -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.18.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/default.yaml index 8f705884d8..edb2737949 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/default.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: proxy-refresh-interval: "70000" -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.18.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/dns.yaml index 08eca1ab6c..0ed2f3dcc3 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/dns.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: proxy-refresh-interval: "70000" -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.18.0 networking: dnsDomain: 1.1.1.1 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/image-repository.yaml index dd634650ff..3d73fb78bc 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/image-repository.yaml @@ -24,6 +24,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -34,12 +40,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: proxy-refresh-interval: "70000" -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.18.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/options.yaml index 782a74c8c3..c0ef89ccaa 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/options.yaml @@ -27,8 +27,10 @@ apiServer: controllerManager: extraArgs: kube-api-burst: "32" + leader-elect: "false" scheduler: extraArgs: + leader-elect: "false" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs clusterName: mk @@ -40,12 +42,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: proxy-refresh-interval: "70000" -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.18.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-api-port.yaml index 5c46412603..617e821e6b 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-api-port.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:12345 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: proxy-refresh-interval: "70000" -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.19.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-pod-network-cidr.yaml index 3270ee9112..d91d3e926e 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-pod-network-cidr.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: proxy-refresh-interval: "70000" -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.19.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd.yaml index 944f6e3f37..6c12857ab2 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: proxy-refresh-interval: "70000" -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.19.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio-options-gates.yaml index f2788b1449..1dcff3d334 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio-options-gates.yaml @@ -29,9 +29,11 @@ controllerManager: extraArgs: feature-gates: "a=b" kube-api-burst: "32" + leader-elect: "false" scheduler: extraArgs: feature-gates: "a=b" + leader-elect: "false" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs clusterName: mk @@ -43,12 +45,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: proxy-refresh-interval: "70000" -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.19.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio.yaml index faca2cc7ce..751041646e 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: proxy-refresh-interval: "70000" -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.19.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/default.yaml index 86d214edb6..da68c6fbcd 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/default.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: proxy-refresh-interval: "70000" -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.19.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/dns.yaml index b4d7860fea..cadb2556e0 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/dns.yaml @@ -23,6 +23,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -33,12 +39,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: proxy-refresh-interval: "70000" -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.19.0 networking: dnsDomain: 1.1.1.1 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/image-repository.yaml index 933aaee359..be593e2fd0 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/image-repository.yaml @@ -24,6 +24,12 @@ apiServer: certSANs: ["127.0.0.1", "localhost", "1.1.1.1"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +controllerManager: + extraArgs: + leader-elect: "false" +scheduler: + extraArgs: + leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 @@ -34,12 +40,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: proxy-refresh-interval: "70000" -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.19.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/options.yaml index 216b59b45d..f9bad9233f 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/options.yaml @@ -27,8 +27,10 @@ apiServer: controllerManager: extraArgs: kube-api-burst: "32" + leader-elect: "false" scheduler: extraArgs: + leader-elect: "false" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs clusterName: mk @@ -40,12 +42,6 @@ etcd: dataDir: /var/lib/minikube/etcd extraArgs: proxy-refresh-interval: "70000" -controllerManager: - extraArgs: - "leader-elect": "false" -scheduler: - extraArgs: - "leader-elect": "false" kubernetesVersion: v1.19.0 networking: dnsDomain: cluster.local diff --git a/pkg/minikube/bootstrapper/bsutil/versions.go b/pkg/minikube/bootstrapper/bsutil/versions.go index 78e3c76e7a..6222380c8d 100644 --- a/pkg/minikube/bootstrapper/bsutil/versions.go +++ b/pkg/minikube/bootstrapper/bsutil/versions.go @@ -89,7 +89,6 @@ var versionSpecificOpts = []config.VersionedExtraOption{ }, GreaterThanOrEqual: semver.MustParse("1.14.0-alpha.0"), }, - { Option: config.ExtraOption{ Component: Kubelet, @@ -98,4 +97,20 @@ var versionSpecificOpts = []config.VersionedExtraOption{ }, LessThanOrEqual: semver.MustParse("1.11.1000"), }, + { + Option: config.ExtraOption{ + Component: ControllerManager, + Key: "leader-elect", + Value: "false", + }, + GreaterThanOrEqual: semver.MustParse("1.14.0"), + }, + { + Option: config.ExtraOption{ + Component: Scheduler, + Key: "leader-elect", + Value: "false", + }, + GreaterThanOrEqual: semver.MustParse("1.14.0"), + }, } diff --git a/pkg/minikube/bootstrapper/images/images.go b/pkg/minikube/bootstrapper/images/images.go index 6cf71f0e07..631e62510e 100644 --- a/pkg/minikube/bootstrapper/images/images.go +++ b/pkg/minikube/bootstrapper/images/images.go @@ -141,7 +141,7 @@ func dashboardFrontend(repo string) string { repo = "kubernetesui" } // See 'kubernetes-dashboard' in deploy/addons/dashboard/dashboard-dp.yaml - return path.Join(repo, "dashboard:v2.0.1") + return path.Join(repo, "dashboard:v2.0.3") } // dashboardMetrics returns the image used for the dashboard metrics scraper diff --git a/pkg/minikube/bootstrapper/images/images_test.go b/pkg/minikube/bootstrapper/images/images_test.go index 2bc369b87c..5b7fe84809 100644 --- a/pkg/minikube/bootstrapper/images/images_test.go +++ b/pkg/minikube/bootstrapper/images/images_test.go @@ -25,7 +25,7 @@ import ( func TestAuxiliary(t *testing.T) { want := []string{ "gcr.io/k8s-minikube/storage-provisioner:v2", - "kubernetesui/dashboard:v2.0.1", + "kubernetesui/dashboard:v2.0.3", "kubernetesui/metrics-scraper:v1.0.4", } got := auxiliary("") @@ -37,7 +37,7 @@ func TestAuxiliary(t *testing.T) { func TestAuxiliaryMirror(t *testing.T) { want := []string{ "test.mirror/storage-provisioner:v2", - "test.mirror/dashboard:v2.0.1", + "test.mirror/dashboard:v2.0.3", "test.mirror/metrics-scraper:v1.0.4", } got := auxiliary("test.mirror") diff --git a/pkg/minikube/bootstrapper/images/kubeadm_test.go b/pkg/minikube/bootstrapper/images/kubeadm_test.go index f1348ce901..0880fb0994 100644 --- a/pkg/minikube/bootstrapper/images/kubeadm_test.go +++ b/pkg/minikube/bootstrapper/images/kubeadm_test.go @@ -38,7 +38,7 @@ func TestKubeadmImages(t *testing.T) { "k8s.gcr.io/etcd:3.4.3-0", "k8s.gcr.io/pause:3.1", "gcr.io/k8s-minikube/storage-provisioner:v2", - "kubernetesui/dashboard:v2.0.1", + "kubernetesui/dashboard:v2.0.3", "kubernetesui/metrics-scraper:v1.0.4", }}, {"v1.16.1", "mirror.k8s.io", []string{ @@ -50,7 +50,7 @@ func TestKubeadmImages(t *testing.T) { "mirror.k8s.io/etcd:3.3.15-0", "mirror.k8s.io/pause:3.1", "mirror.k8s.io/storage-provisioner:v2", - "mirror.k8s.io/dashboard:v2.0.1", + "mirror.k8s.io/dashboard:v2.0.3", "mirror.k8s.io/metrics-scraper:v1.0.4", }}, {"v1.15.0", "", []string{ @@ -62,7 +62,7 @@ func TestKubeadmImages(t *testing.T) { "k8s.gcr.io/etcd:3.3.10", "k8s.gcr.io/pause:3.1", "gcr.io/k8s-minikube/storage-provisioner:v2", - "kubernetesui/dashboard:v2.0.1", + "kubernetesui/dashboard:v2.0.3", "kubernetesui/metrics-scraper:v1.0.4", }}, {"v1.14.0", "", []string{ @@ -74,7 +74,7 @@ func TestKubeadmImages(t *testing.T) { "k8s.gcr.io/etcd:3.3.10", "k8s.gcr.io/pause:3.1", "gcr.io/k8s-minikube/storage-provisioner:v2", - "kubernetesui/dashboard:v2.0.1", + "kubernetesui/dashboard:v2.0.3", "kubernetesui/metrics-scraper:v1.0.4", }}, {"v1.13.0", "", []string{ @@ -86,7 +86,7 @@ func TestKubeadmImages(t *testing.T) { "k8s.gcr.io/etcd:3.2.24", "k8s.gcr.io/pause:3.1", "gcr.io/k8s-minikube/storage-provisioner:v2", - "kubernetesui/dashboard:v2.0.1", + "kubernetesui/dashboard:v2.0.3", "kubernetesui/metrics-scraper:v1.0.4", }}, {"v1.12.0", "", []string{ @@ -98,7 +98,7 @@ func TestKubeadmImages(t *testing.T) { "k8s.gcr.io/etcd:3.2.24", "k8s.gcr.io/pause:3.1", "gcr.io/k8s-minikube/storage-provisioner:v2", - "kubernetesui/dashboard:v2.0.1", + "kubernetesui/dashboard:v2.0.3", "kubernetesui/metrics-scraper:v1.0.4", }}, } diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 9f20e61285..96a21807fd 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -18,20 +18,18 @@ package kubeadm import ( "context" + "fmt" + "net" "os/exec" "path" "runtime" - "sync" - - "fmt" - "net" - - // WARNING: Do not use path/filepath in this package unless you want bizarre Windows paths - "strconv" "strings" + "sync" "time" + // WARNING: Do not use path/filepath in this package unless you want bizarre Windows paths + "github.com/blang/semver" "github.com/docker/machine/libmachine" "github.com/docker/machine/libmachine/state" @@ -56,6 +54,7 @@ import ( "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/out/register" + "k8s.io/minikube/pkg/minikube/style" "k8s.io/minikube/pkg/minikube/sysinit" "k8s.io/minikube/pkg/minikube/vmpath" "k8s.io/minikube/pkg/util" @@ -217,7 +216,6 @@ func (k *Bootstrapper) init(cfg config.ClusterConfig) error { if driver.IsKIC(cfg.Driver) { // to bypass this error: /proc/sys/net/bridge/bridge-nf-call-iptables does not exist ignore = append(ignore, "FileContent--proc-sys-net-bridge-bridge-nf-call-iptables") - } if err := k.clearStaleConfigs(cfg); err != nil { @@ -275,7 +273,6 @@ func (k *Bootstrapper) init(cfg config.ClusterConfig) error { // applyCNI applies CNI to a cluster. Needs to be done every time a VM is powered up. func (k *Bootstrapper) applyCNI(cfg config.ClusterConfig) error { - cnm, err := cni.New(cfg) if err != nil { return errors.Wrap(err, "cni config") @@ -285,7 +282,7 @@ func (k *Bootstrapper) applyCNI(cfg config.ClusterConfig) error { return nil } - out.T(out.CNI, "Configuring {{.name}} (Container Networking Interface) ...", out.V{"name": cnm.String()}) + out.T(style.CNI, "Configuring {{.name}} (Container Networking Interface) ...", out.V{"name": cnm.String()}) if err := cnm.Apply(k.c); err != nil { return errors.Wrap(err, "cni apply") @@ -302,7 +299,6 @@ func (k *Bootstrapper) applyCNI(cfg config.ClusterConfig) error { // unpause unpauses any Kubernetes backplane components func (k *Bootstrapper) unpause(cfg config.ClusterConfig) error { - cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: k.c}) if err != nil { return err @@ -341,7 +337,7 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { return nil } - out.ErrT(out.Embarrassed, "Unable to restart cluster, will reset it: {{.error}}", out.V{"error": rerr}) + out.ErrT(style.Embarrassed, "Unable to restart cluster, will reset it: {{.error}}", out.V{"error": rerr}) if err := k.DeleteCluster(cfg.KubernetesConfig); err != nil { glog.Warningf("delete failed: %v", err) } @@ -360,7 +356,7 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { // retry again if it is not a fail fast error if _, ff := err.(*FailFastError); !ff { - out.ErrT(out.Conflict, "initialization failed, will try again: {{.error}}", out.V{"error": err}) + out.ErrT(style.Conflict, "initialization failed, will try again: {{.error}}", out.V{"error": err}) if err := k.DeleteCluster(cfg.KubernetesConfig); err != nil { glog.Warningf("delete failed: %v", err) } @@ -397,7 +393,7 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time start := time.Now() register.Reg.SetStep(register.VerifyingKubernetes) - out.T(out.HealthCheck, "Verifying Kubernetes components...") + out.T(style.HealthCheck, "Verifying Kubernetes components...") // TODO: #7706: for better performance we could use k.client inside minikube to avoid asking for external IP:PORT cp, err := config.PrimaryControlPlane(&cfg) @@ -572,9 +568,9 @@ func (k *Bootstrapper) restartControlPlane(cfg config.ClusterConfig) error { baseCmd := fmt.Sprintf("%s %s", bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), phase) cmds := []string{ - fmt.Sprintf("%s phase kubelet-start --config %s", baseCmd, conf), fmt.Sprintf("%s phase certs all --config %s", baseCmd, conf), fmt.Sprintf("%s phase kubeconfig all --config %s", baseCmd, conf), + fmt.Sprintf("%s phase kubelet-start --config %s", baseCmd, conf), fmt.Sprintf("%s phase %s all --config %s", baseCmd, controlPlane, conf), fmt.Sprintf("%s phase etcd local --config %s", baseCmd, conf), } @@ -747,14 +743,16 @@ func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error { return errors.Wrap(err, "kubeadm images") } - r, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, - Runner: k.c, Socket: cfg.KubernetesConfig.CRISocket}) + r, err := cruntime.New(cruntime.Config{ + Type: cfg.KubernetesConfig.ContainerRuntime, + Runner: k.c, Socket: cfg.KubernetesConfig.CRISocket, + }) if err != nil { return errors.Wrap(err, "runtime") } if err := r.Preload(cfg.KubernetesConfig); err != nil { - glog.Infof("prelaoding failed, will try to load cached images: %v", err) + glog.Infof("preload failed, will try to load cached images: %v", err) } if cfg.KubernetesConfig.ShouldLoadCachedImages { @@ -831,7 +829,7 @@ func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cru return errors.Wrap(err, "host alias") } - return sm.Start("kubelet") + return nil } // kubectlPath returns the path to the kubelet @@ -942,16 +940,16 @@ func adviseNodePressure(err error, name string, drv string) { glog.Warning(diskErr) out.WarningT("The node {{.name}} has ran out of disk space.", out.V{"name": name}) // generic advice for all drivers - out.T(out.Tip, "Please free up disk or prune images.") + out.T(style.Tip, "Please free up disk or prune images.") if driver.IsVM(drv) { - out.T(out.Stopped, "Please create a cluster with bigger disk size: `minikube start --disk SIZE_MB` ") + out.T(style.Stopped, "Please create a cluster with bigger disk size: `minikube start --disk SIZE_MB` ") } else if drv == oci.Docker && runtime.GOOS != "linux" { - out.T(out.Stopped, "Please increse Desktop's disk size.") + out.T(style.Stopped, "Please increse Desktop's disk size.") if runtime.GOOS == "darwin" { - out.T(out.Documentation, "Documentation: {{.url}}", out.V{"url": "https://docs.docker.com/docker-for-mac/space/"}) + out.T(style.Documentation, "Documentation: {{.url}}", out.V{"url": "https://docs.docker.com/docker-for-mac/space/"}) } if runtime.GOOS == "windows" { - out.T(out.Documentation, "Documentation: {{.url}}", out.V{"url": "https://docs.docker.com/docker-for-windows/"}) + out.T(style.Documentation, "Documentation: {{.url}}", out.V{"url": "https://docs.docker.com/docker-for-windows/"}) } } out.ErrLn("") @@ -962,16 +960,16 @@ func adviseNodePressure(err error, name string, drv string) { out.ErrLn("") glog.Warning(memErr) out.WarningT("The node {{.name}} has ran out of memory.", out.V{"name": name}) - out.T(out.Tip, "Check if you have unnecessary pods running by running 'kubectl get po -A") + out.T(style.Tip, "Check if you have unnecessary pods running by running 'kubectl get po -A") if driver.IsVM(drv) { - out.T(out.Stopped, "Consider creating a cluster with larger memory size using `minikube start --memory SIZE_MB` ") + out.T(style.Stopped, "Consider creating a cluster with larger memory size using `minikube start --memory SIZE_MB` ") } else if drv == oci.Docker && runtime.GOOS != "linux" { - out.T(out.Stopped, "Consider increasing Docker Desktop's memory size.") + out.T(style.Stopped, "Consider increasing Docker Desktop's memory size.") if runtime.GOOS == "darwin" { - out.T(out.Documentation, "Documentation: {{.url}}", out.V{"url": "https://docs.docker.com/docker-for-mac/space/"}) + out.T(style.Documentation, "Documentation: {{.url}}", out.V{"url": "https://docs.docker.com/docker-for-mac/space/"}) } if runtime.GOOS == "windows" { - out.T(out.Documentation, "Documentation: {{.url}}", out.V{"url": "https://docs.docker.com/docker-for-windows/"}) + out.T(style.Documentation, "Documentation: {{.url}}", out.V{"url": "https://docs.docker.com/docker-for-windows/"}) } } out.ErrLn("") diff --git a/pkg/minikube/browser/browser.go b/pkg/minikube/browser/browser.go index 890e068844..92da5e1010 100644 --- a/pkg/minikube/browser/browser.go +++ b/pkg/minikube/browser/browser.go @@ -22,6 +22,7 @@ import ( "github.com/pkg/browser" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/style" ) // OpenURL opens a new browser window pointing to URL. @@ -29,7 +30,7 @@ func OpenURL(url string) error { if runtime.GOOS == "linux" { _, err := exec.LookPath("xdg-open") if err != nil { - out.T(out.URL, url) + out.T(style.URL, url) return nil } } diff --git a/pkg/minikube/cluster/cluster.go b/pkg/minikube/cluster/cluster.go index 4050be5ee8..e482082a8f 100644 --- a/pkg/minikube/cluster/cluster.go +++ b/pkg/minikube/cluster/cluster.go @@ -31,6 +31,7 @@ import ( "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/reason" ) // This init function is used to set the logtostderr variable to false so that INFO level log info does not clutter the CLI @@ -38,7 +39,7 @@ import ( // see: https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/util/logs/logs.go#L32-L34 func init() { if err := flag.Set("logtostderr", "false"); err != nil { - exit.WithError("unable to set logtostderr", err) + exit.Error(reason.InternalFlagSet, "unable to set logtostderr", err) } // Setting the default client to native gives much better performance. diff --git a/pkg/minikube/cni/calico.go b/pkg/minikube/cni/calico.go index 6386effc8f..5a27050ad6 100644 --- a/pkg/minikube/cni/calico.go +++ b/pkg/minikube/cni/calico.go @@ -711,6 +711,8 @@ spec: value: "info" - name: FELIX_HEALTHENABLED value: "true" + - name: IP_AUTODETECTION_METHOD + value: interface=eth.* securityContext: privileged: true resources: diff --git a/pkg/minikube/config/types.go b/pkg/minikube/config/types.go index 42a3e5c26b..e2f5a66438 100644 --- a/pkg/minikube/config/types.go +++ b/pkg/minikube/config/types.go @@ -45,6 +45,7 @@ type ClusterConfig struct { HyperkitVpnKitSock string // Only used by the Hyperkit driver HyperkitVSockPorts []string // Only used by the Hyperkit driver DockerEnv []string // Each entry is formatted as KEY=VALUE. + ContainerVolumeMounts []string // Only used by container drivers: Docker, Podman InsecureRegistry []string RegistryMirror []string HostOnlyCIDR string // Only used by the virtualbox driver diff --git a/pkg/minikube/constants/constants.go b/pkg/minikube/constants/constants.go index 748a119086..ab48a890a1 100644 --- a/pkg/minikube/constants/constants.go +++ b/pkg/minikube/constants/constants.go @@ -27,10 +27,10 @@ import ( const ( // DefaultKubernetesVersion is the default Kubernetes version - DefaultKubernetesVersion = "v1.18.3" + DefaultKubernetesVersion = "v1.19.0" // NewestKubernetesVersion is the newest Kubernetes version to test against // NOTE: You may need to update coreDNS & etcd versions in pkg/minikube/bootstrapper/images/images.go - NewestKubernetesVersion = "v1.19.0-rc.4" + NewestKubernetesVersion = "v1.19.0" // OldestKubernetesVersion is the oldest Kubernetes version to test against OldestKubernetesVersion = "v1.13.0" // DefaultClusterName is the default nane for the k8s cluster diff --git a/pkg/minikube/cruntime/containerd.go b/pkg/minikube/cruntime/containerd.go index a237fe285c..c894768495 100644 --- a/pkg/minikube/cruntime/containerd.go +++ b/pkg/minikube/cruntime/containerd.go @@ -35,7 +35,7 @@ import ( "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/download" - "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/style" "k8s.io/minikube/pkg/minikube/sysinit" ) @@ -130,8 +130,8 @@ func (r *Containerd) Name() string { } // Style is the console style for containerd -func (r *Containerd) Style() out.StyleEnum { - return out.Containerd +func (r *Containerd) Style() style.Enum { + return style.Containerd } // Version retrieves the current version of this runtime diff --git a/pkg/minikube/cruntime/containerd_test.go b/pkg/minikube/cruntime/containerd_test.go index 5b1e6414f9..93a58ae001 100644 --- a/pkg/minikube/cruntime/containerd_test.go +++ b/pkg/minikube/cruntime/containerd_test.go @@ -25,7 +25,7 @@ func TestAddRepoTagToImageName(t *testing.T) { imgName string want string }{ - {"kubernetesui/dashboard:v2.0.1", "docker.io/kubernetesui/dashboard:v2.0.1"}, + {"kubernetesui/dashboard:v2.0.3", "docker.io/kubernetesui/dashboard:v2.0.3"}, {"kubernetesui/metrics-scraper:v1.0.4", "docker.io/kubernetesui/metrics-scraper:v1.0.4"}, {"gcr.io/k8s-minikube/storage-provisioner:v2", "gcr.io/k8s-minikube/storage-provisioner:v2"}, } diff --git a/pkg/minikube/cruntime/crio.go b/pkg/minikube/cruntime/crio.go index 21bf675581..f8c4937434 100644 --- a/pkg/minikube/cruntime/crio.go +++ b/pkg/minikube/cruntime/crio.go @@ -33,7 +33,7 @@ import ( "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/download" - "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/style" "k8s.io/minikube/pkg/minikube/sysinit" ) @@ -69,8 +69,8 @@ func (r *CRIO) Name() string { } // Style is the console style for CRIO -func (r *CRIO) Style() out.StyleEnum { - return out.CRIO +func (r *CRIO) Style() style.Enum { + return style.CRIO } // Version retrieves the current version of this runtime diff --git a/pkg/minikube/cruntime/cruntime.go b/pkg/minikube/cruntime/cruntime.go index 8457f898ea..7012e69e71 100644 --- a/pkg/minikube/cruntime/cruntime.go +++ b/pkg/minikube/cruntime/cruntime.go @@ -27,7 +27,7 @@ import ( "k8s.io/minikube/pkg/minikube/assets" "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/style" "k8s.io/minikube/pkg/minikube/sysinit" ) @@ -76,7 +76,7 @@ type Manager interface { // Available returns an error if it is not possible to use this runtime on a host Available() error // Style is an associated StyleEnum for Name() - Style() out.StyleEnum + Style() style.Enum // CGroupDriver returns cgroup driver ("cgroupfs" or "systemd") CGroupDriver() (string, error) @@ -175,7 +175,6 @@ func ContainerStatusCommand() string { // disableOthers disables all other runtimes except for me. func disableOthers(me Manager, cr CommandRunner) error { - // valid values returned by manager.Name() runtimes := []string{"containerd", "crio", "docker"} for _, name := range runtimes { diff --git a/pkg/minikube/cruntime/docker.go b/pkg/minikube/cruntime/docker.go index 62487b9d54..750d06c071 100644 --- a/pkg/minikube/cruntime/docker.go +++ b/pkg/minikube/cruntime/docker.go @@ -31,7 +31,7 @@ import ( "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/docker" "k8s.io/minikube/pkg/minikube/download" - "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/style" "k8s.io/minikube/pkg/minikube/sysinit" ) @@ -49,6 +49,7 @@ func NewErrISOFeature(missing string) *ErrISOFeature { missing: missing, } } + func (e *ErrISOFeature) Error() string { return e.missing } @@ -66,8 +67,8 @@ func (r *Docker) Name() string { } // Style is the console style for Docker -func (r *Docker) Style() out.StyleEnum { - return out.Docker +func (r *Docker) Style() style.Enum { + return style.Docker } // Version retrieves the current version of this runtime @@ -154,7 +155,6 @@ func (r *Docker) LoadImage(path string) error { return errors.Wrap(err, "loadimage docker.") } return nil - } // CGroupDriver returns cgroup driver ("cgroupfs" or "systemd") diff --git a/pkg/minikube/download/driver.go b/pkg/minikube/download/driver.go index 65780be044..1dfff6f18f 100644 --- a/pkg/minikube/download/driver.go +++ b/pkg/minikube/download/driver.go @@ -23,6 +23,7 @@ import ( "github.com/blang/semver" "github.com/pkg/errors" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/style" ) func driverWithChecksumURL(name string, v semver.Version) string { @@ -32,11 +33,11 @@ func driverWithChecksumURL(name string, v semver.Version) string { // Driver downloads an arbitrary driver func Driver(name string, destination string, v semver.Version) error { - out.T(out.FileDownload, "Downloading driver {{.driver}}:", out.V{"driver": name}) + out.T(style.FileDownload, "Downloading driver {{.driver}}:", out.V{"driver": name}) if err := download(driverWithChecksumURL(name, v), destination); err != nil { return errors.Wrap(err, "download") } // Give downloaded drivers a baseline decent file permission - return os.Chmod(destination, 0755) + return os.Chmod(destination, 0o755) } diff --git a/pkg/minikube/download/iso.go b/pkg/minikube/download/iso.go index d4ca027a00..43cefcda78 100644 --- a/pkg/minikube/download/iso.go +++ b/pkg/minikube/download/iso.go @@ -30,6 +30,7 @@ import ( "github.com/pkg/errors" "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/style" "k8s.io/minikube/pkg/util/lock" "k8s.io/minikube/pkg/version" ) @@ -126,7 +127,7 @@ func downloadISO(isoURL string, skipChecksum bool) error { return nil } - out.T(out.ISODownload, "Downloading VM boot image ...") + out.T(style.ISODownload, "Downloading VM boot image ...") urlWithChecksum := isoURL + "?checksum=file:" + isoURL + ".sha256" if skipChecksum { diff --git a/pkg/minikube/download/json_output.go b/pkg/minikube/download/json_output.go index 7a19838883..483f36c735 100644 --- a/pkg/minikube/download/json_output.go +++ b/pkg/minikube/download/json_output.go @@ -20,6 +20,7 @@ import ( "fmt" "io" "sync" + "time" "github.com/hashicorp/go-getter" "k8s.io/minikube/pkg/minikube/out/register" @@ -44,6 +45,7 @@ func (cpb *jsonOutput) TrackProgress(src string, currentSize, totalSize int64, s artifact: src, current: currentSize, total: totalSize, + Time: time.Now(), }, close: func() error { cpb.lock.Lock() @@ -59,12 +61,17 @@ type jsonReader struct { current int64 total int64 io.Reader + time.Time } func (r *jsonReader) Read(p []byte) (n int, err error) { n, err = r.Reader.Read(p) r.current += int64(n) progress := float64(r.current) / float64(r.total) - register.PrintDownloadProgress(r.artifact, fmt.Sprintf("%v", progress)) + // print progress every second so user isn't overwhelmed with events + if t := time.Now(); t.Sub(r.Time) > time.Second || progress == 1 { + register.PrintDownloadProgress(r.artifact, fmt.Sprintf("%v", progress)) + r.Time = t + } return } diff --git a/pkg/minikube/download/preload.go b/pkg/minikube/download/preload.go index 4429dabcc1..67415ebff2 100644 --- a/pkg/minikube/download/preload.go +++ b/pkg/minikube/download/preload.go @@ -34,6 +34,7 @@ import ( "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/style" ) const ( @@ -86,7 +87,6 @@ func remoteTarballURL(k8sVersion, containerRuntime string) string { // PreloadExists returns true if there is a preloaded tarball that can be used func PreloadExists(k8sVersion, containerRuntime string, forcePreload ...bool) bool { - // TODO (#8166): Get rid of the need for this and viper at all force := false if len(forcePreload) > 0 { @@ -138,7 +138,7 @@ func Preload(k8sVersion, containerRuntime string) error { return nil } - out.T(out.FileDownload, "Downloading Kubernetes {{.version}} preload ...", out.V{"version": k8sVersion}) + out.T(style.FileDownload, "Downloading Kubernetes {{.version}} preload ...", out.V{"version": k8sVersion}) url := remoteTarballURL(k8sVersion, containerRuntime) if err := download(url, targetPath); err != nil { @@ -168,7 +168,7 @@ func saveChecksumFile(k8sVersion, containerRuntime string) error { return errors.Wrap(err, "getting storage object") } checksum := attrs.MD5 - return ioutil.WriteFile(PreloadChecksumPath(k8sVersion, containerRuntime), checksum, 0644) + return ioutil.WriteFile(PreloadChecksumPath(k8sVersion, containerRuntime), checksum, 0o644) } // verifyChecksum returns true if the checksum of the local binary matches diff --git a/pkg/minikube/driver/install.go b/pkg/minikube/driver/install.go index ae17a917cd..5d019823c3 100644 --- a/pkg/minikube/driver/install.go +++ b/pkg/minikube/driver/install.go @@ -32,6 +32,7 @@ import ( "k8s.io/minikube/pkg/minikube/download" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/style" "k8s.io/minikube/pkg/util/lock" ) @@ -90,7 +91,7 @@ func fixDriverPermissions(name string, path string, interactive bool) error { example.WriteString(fmt.Sprintf(" $ %s \n", strings.Join(c.Args, " "))) } - out.T(out.Permissions, "The '{{.driver}}' driver requires elevated permissions. The following commands will be executed:\n\n{{ .example }}\n", out.V{"driver": name, "example": example.String()}) + out.T(style.Permissions, "The '{{.driver}}' driver requires elevated permissions. The following commands will be executed:\n\n{{ .example }}\n", out.V{"driver": name, "example": example.String()}) for _, c := range cmds { testArgs := append([]string{"-n"}, c.Args[1:]...) test := exec.Command("sudo", testArgs...) diff --git a/pkg/minikube/exit/exit.go b/pkg/minikube/exit/exit.go index dce7ce1eb5..4073ce5deb 100644 --- a/pkg/minikube/exit/exit.go +++ b/pkg/minikube/exit/exit.go @@ -20,65 +20,56 @@ package exit import ( "os" "runtime" - "runtime/debug" "github.com/golang/glog" "k8s.io/minikube/pkg/minikube/out" - "k8s.io/minikube/pkg/minikube/problem" + "k8s.io/minikube/pkg/minikube/reason" + "k8s.io/minikube/pkg/minikube/style" ) -// Exit codes based on sysexits(3) -const ( - Failure = 1 // Failure represents a general failure code - Interrupted = 2 // Ctrl-C (SIGINT) - BadUsage = 64 // Usage represents an incorrect command line - Data = 65 // Data represents incorrect data supplied by the user - NoInput = 66 // NoInput represents that the input file did not exist or was not readable - Unavailable = 69 // Unavailable represents when a service was unavailable - Software = 70 // Software represents an internal software error. - IO = 74 // IO represents an I/O error - Permissions = 77 // Permissions represents a permissions error - Config = 78 // Config represents an unconfigured or misconfigured state - InsufficientStorage = 507 // InsufficientStorage represents insufficient storage in the VM/container -) - -// UsageT outputs a templated usage error and exits with error code 64 -func UsageT(format string, a ...out.V) { - out.ErrWithExitCode(out.Usage, format, BadUsage, a...) - os.Exit(BadUsage) -} - -// WithCodeT outputs a templated fatal error message and exits with the supplied error code. -func WithCodeT(code int, format string, a ...out.V) { - out.ErrWithExitCode(out.FatalType, format, code, a...) - os.Exit(code) -} - -// WithError outputs an error and exits. -func WithError(msg string, err error) { - glog.Infof("WithError(%s)=%v called from:\n%s", msg, err, debug.Stack()) - p := problem.FromError(err, runtime.GOOS) - - if p != nil { - if out.JSON { - p.DisplayJSON(Config) - os.Exit(Config) - } - WithProblem(msg, err, p) - os.Exit(Config) +// Message outputs a templated message and exits without interpretation +func Message(r reason.Kind, format string, args ...out.V) { + if r.ID == "" { + glog.Errorf("supplied reason has no ID: %+v", r) } - out.DisplayError(msg, err) - os.Exit(Software) + + if r.Style == style.None { + r.Style = style.Failure + } + + if r.ExitCode == 0 { + r.ExitCode = reason.ExProgramError + } + + if len(args) == 0 { + args = append(args, out.V{}) + } + + // No need to manipulate the message for JSON output + if out.JSON { + out.Error(r, format, args...) + } else { + args[0]["fatal_msg"] = out.Fmt(format, args...) + args[0]["fatal_code"] = r.ID + out.Error(r, "Exiting due to {{.fatal_code}}: {{.fatal_msg}}", args...) + } + + os.Exit(r.ExitCode) } -// WithProblem outputs info related to a known problem and exits. -func WithProblem(msg string, err error, p *problem.Problem) { - out.ErrT(out.Empty, "") - out.FailureT("[{{.id}}] {{.msg}} {{.error}}", out.V{"msg": msg, "id": p.ID, "error": p.Err}) - p.Display() - if p.ShowIssueLink { - out.ErrT(out.Empty, "") - out.ErrT(out.Sad, "If the above advice does not help, please let us know: ") - out.ErrT(out.URL, "https://github.com/kubernetes/minikube/issues/new/choose") - } +// Advice is syntactic sugar to output a message with dynamically generated advice +func Advice(r reason.Kind, msg string, advice string, a ...out.V) { + r.Advice = out.Fmt(advice, a...) + Message(r, msg, a...) +} + +// Error takes a fatal error, matches it against known issues, and outputs the best message for it +func Error(r reason.Kind, msg string, err error) { + ki := reason.MatchKnownIssue(r, err, runtime.GOOS) + if ki != nil { + Message(*ki, err.Error()) + } + // By default, unmatched errors should show a link + r.NewIssueLink = true + Message(r, err.Error()) } diff --git a/pkg/minikube/kubeconfig/context.go b/pkg/minikube/kubeconfig/context.go index f082acdc4a..bb0b6dad92 100644 --- a/pkg/minikube/kubeconfig/context.go +++ b/pkg/minikube/kubeconfig/context.go @@ -56,10 +56,7 @@ func SetCurrentContext(name string, configPath ...string) error { return errors.Wrap(err, "Error getting kubeconfig status") } kcfg.CurrentContext = name - if err := writeToFile(kcfg, fPath); err != nil { - return errors.Wrap(err, "writing kubeconfig") - } - return nil + return writeToFile(kcfg, fPath) } // DeleteContext deletes the specified machine's kubeconfig context diff --git a/pkg/minikube/localpath/localpath_test.go b/pkg/minikube/localpath/localpath_test.go index 00f52d09da..782ae87c97 100644 --- a/pkg/minikube/localpath/localpath_test.go +++ b/pkg/minikube/localpath/localpath_test.go @@ -33,12 +33,12 @@ func TestReplaceWinDriveLetterToVolumeName(t *testing.T) { if err != nil { t.Fatalf("Error make tmp directory: %v", err) } - defer func() { //clean up tempdir + defer func(path string) { //clean up tempdir err := os.RemoveAll(path) if err != nil { t.Errorf("failed to clean up temp folder %q", path) } - }() + }(path) if runtime.GOOS != "windows" { // Replace to fake func. diff --git a/pkg/minikube/logs/logs.go b/pkg/minikube/logs/logs.go index aedf55457d..19c95972a2 100644 --- a/pkg/minikube/logs/logs.go +++ b/pkg/minikube/logs/logs.go @@ -34,6 +34,7 @@ import ( "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/style" ) // rootCauses are regular expressions that match known failures @@ -148,7 +149,7 @@ func OutputProblems(problems map[string][]string, maxLines int) { lines = lines[len(lines)-maxLines:] } for _, l := range lines { - out.T(out.LogEntry, l) + out.T(style.LogEntry, l) } } } @@ -167,9 +168,9 @@ func Output(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.Cluster failed := []string{} for i, name := range names { if i > 0 { - out.T(out.Empty, "") + out.T(style.Empty, "") } - out.T(out.Empty, "==> {{.name}} <==", out.V{"name": name}) + out.T(style.Empty, "==> {{.name}} <==", out.V{"name": name}) var b bytes.Buffer c := exec.Command("/bin/bash", "-c", cmds[name]) c.Stdout = &b @@ -181,7 +182,7 @@ func Output(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.Cluster } scanner := bufio.NewScanner(&b) for scanner.Scan() { - out.T(out.Empty, scanner.Text()) + out.T(style.Empty, scanner.Text()) } } diff --git a/pkg/minikube/machine/advice.go b/pkg/minikube/machine/advice.go index f3a73578aa..dc08d9a667 100644 --- a/pkg/minikube/machine/advice.go +++ b/pkg/minikube/machine/advice.go @@ -22,43 +22,43 @@ import ( "github.com/pkg/errors" "k8s.io/minikube/pkg/drivers/kic/oci" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/style" ) // MaybeDisplayAdvice will provide advice without exiting, so minikube has a chance to try the failover func MaybeDisplayAdvice(err error, driver string) { if errors.Is(err, oci.ErrDaemonInfo) { out.ErrLn("") - out.ErrT(out.Conflict, "{{.driver_name}} couldn't proceed because {{.driver_name}} service is not healthy.", out.V{"driver_name": driver}) + out.ErrT(style.Conflict, "{{.driver_name}} couldn't proceed because {{.driver_name}} service is not healthy.", out.V{"driver_name": driver}) } if errors.Is(err, oci.ErrExitedUnexpectedly) { out.ErrLn("") - out.ErrT(out.Conflict, "The minikube {{.driver_name}} container exited unexpectedly.", out.V{"driver_name": driver}) + out.ErrT(style.Conflict, "The minikube {{.driver_name}} container exited unexpectedly.", out.V{"driver_name": driver}) } if errors.Is(err, oci.ErrExitedUnexpectedly) || errors.Is(err, oci.ErrDaemonInfo) { - out.T(out.Tip, "If you are still interested to make {{.driver_name}} driver work. The following suggestions might help you get passed this issue:", out.V{"driver_name": driver}) - out.T(out.Empty, ` + out.T(style.Tip, "If you are still interested to make {{.driver_name}} driver work. The following suggestions might help you get passed this issue:", out.V{"driver_name": driver}) + out.T(style.Empty, ` - Prune unused {{.driver_name}} images, volumes and abandoned containers.`, out.V{"driver_name": driver}) - out.T(out.Empty, ` + out.T(style.Empty, ` - Restart your {{.driver_name}} service`, out.V{"driver_name": driver}) if runtime.GOOS != "linux" { - out.T(out.Empty, ` + out.T(style.Empty, ` - Ensure your {{.driver_name}} daemon has access to enough CPU/memory resources. `, out.V{"driver_name": driver}) if runtime.GOOS == "darwin" && driver == oci.Docker { - out.T(out.Empty, ` + out.T(style.Empty, ` - Docs https://docs.docker.com/docker-for-mac/#resources`, out.V{"driver_name": driver}) } if runtime.GOOS == "windows" && driver == oci.Docker { - out.T(out.Empty, ` + out.T(style.Empty, ` - Docs https://docs.docker.com/docker-for-windows/#resources`, out.V{"driver_name": driver}) } } - out.T(out.Empty, ` + out.T(style.Empty, ` - Delete and recreate minikube cluster minikube delete minikube start --driver={{.driver_name}}`, out.V{"driver_name": driver}) // TODO #8348: maybe advice user if to set the --force-systemd https://github.com/kubernetes/minikube/issues/8348 } - } diff --git a/pkg/minikube/machine/client.go b/pkg/minikube/machine/client.go index ff69f17ced..0f7a585e55 100644 --- a/pkg/minikube/machine/client.go +++ b/pkg/minikube/machine/client.go @@ -47,6 +47,7 @@ import ( "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/reason" "k8s.io/minikube/pkg/minikube/registry" ) @@ -220,7 +221,6 @@ func (api *LocalClient) Create(h *host.Host) error { } for _, step := range steps { - if err := step.f(); err != nil { return errors.Wrap(err, step.name) } @@ -281,7 +281,7 @@ func (cg *CertGenerator) ValidateCertificate(addr string, authOptions *auth.Opti func registerDriver(drvName string) { def := registry.Driver(drvName) if def.Empty() { - exit.UsageT("unsupported or missing driver: {{.name}}", out.V{"name": drvName}) + exit.Message(reason.Usage, "unsupported or missing driver: {{.name}}", out.V{"name": drvName}) } plugin.RegisterDriver(def.Init()) } diff --git a/pkg/minikube/machine/cluster_test.go b/pkg/minikube/machine/cluster_test.go index e5f39bd454..58ebf8c698 100644 --- a/pkg/minikube/machine/cluster_test.go +++ b/pkg/minikube/machine/cluster_test.go @@ -70,6 +70,9 @@ var defaultClusterConfig = config.ClusterConfig{ } func TestCreateHost(t *testing.T) { + tempDir := tests.MakeTempDir() + defer tests.RemoveTempDir(tempDir) + download.EnableMock(true) RegisterMockDriver(t) @@ -115,6 +118,9 @@ func TestCreateHost(t *testing.T) { } func TestStartHostExists(t *testing.T) { + tempDir := tests.MakeTempDir() + defer tests.RemoveTempDir(tempDir) + download.EnableMock(true) RegisterMockDriver(t) @@ -154,6 +160,9 @@ func TestStartHostExists(t *testing.T) { } func TestStartHostErrMachineNotExist(t *testing.T) { + tempDir := tests.MakeTempDir() + defer tests.RemoveTempDir(tempDir) + download.EnableMock(true) RegisterMockDriver(t) @@ -201,6 +210,9 @@ func TestStartHostErrMachineNotExist(t *testing.T) { } func TestStartStoppedHost(t *testing.T) { + tempDir := tests.MakeTempDir() + defer tests.RemoveTempDir(tempDir) + download.EnableMock(true) RegisterMockDriver(t) @@ -238,6 +250,9 @@ func TestStartStoppedHost(t *testing.T) { } func TestStartHost(t *testing.T) { + tempDir := tests.MakeTempDir() + defer tests.RemoveTempDir(tempDir) + download.EnableMock(true) RegisterMockDriver(t) @@ -268,6 +283,9 @@ func TestStartHost(t *testing.T) { } func TestStartHostConfig(t *testing.T) { + tempDir := tests.MakeTempDir() + defer tests.RemoveTempDir(tempDir) + download.EnableMock(true) RegisterMockDriver(t) @@ -310,6 +328,9 @@ func TestStopHostError(t *testing.T) { } func TestStopHost(t *testing.T) { + tempDir := tests.MakeTempDir() + defer tests.RemoveTempDir(tempDir) + RegisterMockDriver(t) api := tests.NewMockAPI(t) h, err := createHost(api, &defaultClusterConfig, &config.Node{Name: "minikube"}) @@ -329,6 +350,9 @@ func TestStopHost(t *testing.T) { } func TestDeleteHost(t *testing.T) { + tempDir := tests.MakeTempDir() + defer tests.RemoveTempDir(tempDir) + RegisterMockDriver(t) api := tests.NewMockAPI(t) if _, err := createHost(api, &defaultClusterConfig, &config.Node{Name: "minikube"}); err != nil { @@ -344,6 +368,9 @@ func TestDeleteHost(t *testing.T) { } func TestDeleteHostErrorDeletingVM(t *testing.T) { + tempDir := tests.MakeTempDir() + defer tests.RemoveTempDir(tempDir) + RegisterMockDriver(t) api := tests.NewMockAPI(t) h, err := createHost(api, &defaultClusterConfig, &config.Node{Name: "minikube"}) @@ -360,6 +387,9 @@ func TestDeleteHostErrorDeletingVM(t *testing.T) { } func TestDeleteHostErrorDeletingFiles(t *testing.T) { + tempDir := tests.MakeTempDir() + defer tests.RemoveTempDir(tempDir) + RegisterMockDriver(t) api := tests.NewMockAPI(t) api.RemoveError = true @@ -373,6 +403,9 @@ func TestDeleteHostErrorDeletingFiles(t *testing.T) { } func TestDeleteHostErrMachineNotExist(t *testing.T) { + tempDir := tests.MakeTempDir() + defer tests.RemoveTempDir(tempDir) + RegisterMockDriver(t) api := tests.NewMockAPI(t) // Create an incomplete host with machine does not exist error(i.e. User Interrupt Cancel) @@ -388,6 +421,9 @@ func TestDeleteHostErrMachineNotExist(t *testing.T) { } func TestStatus(t *testing.T) { + tempDir := tests.MakeTempDir() + defer tests.RemoveTempDir(tempDir) + RegisterMockDriver(t) api := tests.NewMockAPI(t) diff --git a/pkg/minikube/machine/delete.go b/pkg/minikube/machine/delete.go index 3c0a2d6f12..f4bfa83f47 100644 --- a/pkg/minikube/machine/delete.go +++ b/pkg/minikube/machine/delete.go @@ -31,6 +31,7 @@ import ( "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/style" ) // deleteOrphanedKIC attempts to delete an orphaned docker instance for machines without a config file @@ -95,7 +96,7 @@ func DeleteHost(api libmachine.API, machineName string, deleteAbandoned ...bool) time.Sleep(1 * time.Second) } - out.T(out.DeletingHost, `Deleting "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": machineName, "driver_name": host.DriverName}) + out.T(style.DeletingHost, `Deleting "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": machineName, "driver_name": host.DriverName}) return delete(api, host, machineName) } diff --git a/pkg/minikube/machine/fix.go b/pkg/minikube/machine/fix.go index f36f254d85..7eca6bd1c8 100644 --- a/pkg/minikube/machine/fix.go +++ b/pkg/minikube/machine/fix.go @@ -34,6 +34,7 @@ import ( "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/style" ) // hostRunner is a minimal host.Host based interface for running commands @@ -112,7 +113,7 @@ func recreateIfNeeded(api libmachine.API, cc *config.ClusterConfig, n *config.No } if !me || err == constants.ErrMachineMissing { - out.T(out.Shrug, `{{.driver_name}} "{{.cluster}}" {{.machine_type}} is missing, will recreate.`, out.V{"driver_name": cc.Driver, "cluster": machineName, "machine_type": machineType}) + out.T(style.Shrug, `{{.driver_name}} "{{.cluster}}" {{.machine_type}} is missing, will recreate.`, out.V{"driver_name": cc.Driver, "cluster": machineName, "machine_type": machineType}) demolish(api, *cc, *n, h) glog.Infof("Sleeping 1 second for extra luck!") @@ -134,13 +135,13 @@ func recreateIfNeeded(api libmachine.API, cc *config.ClusterConfig, n *config.No if s == state.Running { if !recreated { - out.T(out.Running, `Updating the running {{.driver_name}} "{{.cluster}}" {{.machine_type}} ...`, out.V{"driver_name": cc.Driver, "cluster": machineName, "machine_type": machineType}) + out.T(style.Running, `Updating the running {{.driver_name}} "{{.cluster}}" {{.machine_type}} ...`, out.V{"driver_name": cc.Driver, "cluster": machineName, "machine_type": machineType}) } return h, nil } if !recreated { - out.T(out.Restarting, `Restarting existing {{.driver_name}} {{.machine_type}} for "{{.cluster}}" ...`, out.V{"driver_name": cc.Driver, "cluster": machineName, "machine_type": machineType}) + out.T(style.Restarting, `Restarting existing {{.driver_name}} {{.machine_type}} for "{{.cluster}}" ...`, out.V{"driver_name": cc.Driver, "cluster": machineName, "machine_type": machineType}) } if err := h.Driver.Start(); err != nil { MaybeDisplayAdvice(err, h.DriverName) @@ -160,7 +161,7 @@ func maybeWarnAboutEvalEnv(drver string, name string) { return } if os.Getenv(constants.MinikubeActiveDockerdEnv) != "" { - out.T(out.Notice, "Noticed you have an activated docker-env on {{.driver_name}} driver in this terminal:", out.V{"driver_name": drver}) + out.T(style.Notice, "Noticed you have an activated docker-env on {{.driver_name}} driver in this terminal:", out.V{"driver_name": drver}) // TODO: refactor docker-env package to generate only eval command per shell. https://github.com/kubernetes/minikube/issues/6887 out.WarningT(`Please re-eval your docker-env, To ensure your environment variables have updated ports: @@ -169,7 +170,7 @@ func maybeWarnAboutEvalEnv(drver string, name string) { `, out.V{"profile_name": name}) } if os.Getenv(constants.MinikubeActivePodmanEnv) != "" { - out.T(out.Notice, "Noticed you have an activated podman-env on {{.driver_name}} driver in this terminal:", out.V{"driver_name": drver}) + out.T(style.Notice, "Noticed you have an activated podman-env on {{.driver_name}} driver in this terminal:", out.V{"driver_name": drver}) // TODO: refactor podman-env package to generate only eval command per shell. https://github.com/kubernetes/minikube/issues/6887 out.WarningT(`Please re-eval your podman-env, To ensure your environment variables have updated ports: @@ -177,7 +178,6 @@ func maybeWarnAboutEvalEnv(drver string, name string) { `, out.V{"profile_name": name}) } - } // ensureGuestClockSync ensures that the guest system clock is relatively in-sync diff --git a/pkg/minikube/machine/info.go b/pkg/minikube/machine/info.go index 7799d513da..99bf55982d 100644 --- a/pkg/minikube/machine/info.go +++ b/pkg/minikube/machine/info.go @@ -28,6 +28,7 @@ import ( "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/out/register" + "k8s.io/minikube/pkg/minikube/style" ) // HostInfo holds information on the user's machine @@ -80,7 +81,7 @@ func showLocalOsRelease() { } register.Reg.SetStep(register.LocalOSRelease) - out.T(out.Provisioner, "OS release is {{.pretty_name}}", out.V{"pretty_name": osReleaseInfo.PrettyName}) + out.T(style.Provisioner, "OS release is {{.pretty_name}}", out.V{"pretty_name": osReleaseInfo.PrettyName}) } // logRemoteOsRelease shows systemd information about the current linux distribution, on the remote VM @@ -99,8 +100,10 @@ func logRemoteOsRelease(r command.Runner) { glog.Infof("Remote host: %s", osReleaseInfo.PrettyName) } -var cachedSystemMemoryLimit *mem.VirtualMemoryStat -var cachedSystemMemoryErr *error +var ( + cachedSystemMemoryLimit *mem.VirtualMemoryStat + cachedSystemMemoryErr *error +) // cachedSysMemLimit will return a cached limit for the system's virtual memory. func cachedSysMemLimit() (*mem.VirtualMemoryStat, error) { @@ -115,8 +118,10 @@ func cachedSysMemLimit() (*mem.VirtualMemoryStat, error) { return cachedSystemMemoryLimit, *cachedSystemMemoryErr } -var cachedDisk *disk.UsageStat -var cachedDiskInfoErr *error +var ( + cachedDisk *disk.UsageStat + cachedDiskInfoErr *error +) // cachedDiskInfo will return a cached disk usage info func cachedDiskInfo() (disk.UsageStat, error) { @@ -131,8 +136,10 @@ func cachedDiskInfo() (disk.UsageStat, error) { return *cachedDisk, *cachedDiskInfoErr } -var cachedCPU *[]cpu.InfoStat -var cachedCPUErr *error +var ( + cachedCPU *[]cpu.InfoStat + cachedCPUErr *error +) // cachedCPUInfo will return a cached cpu info func cachedCPUInfo() ([]cpu.InfoStat, error) { diff --git a/pkg/minikube/machine/ssh.go b/pkg/minikube/machine/ssh.go index 0f991e2f2c..c547ed09d7 100644 --- a/pkg/minikube/machine/ssh.go +++ b/pkg/minikube/machine/ssh.go @@ -42,14 +42,14 @@ func CreateSSHShell(api libmachine.API, cc config.ClusterConfig, n config.Node, return errors.Errorf("%q is not running", machineName) } - client, err := host.CreateSSHClient() - if native { ssh.SetDefaultClient(ssh.Native) } else { ssh.SetDefaultClient(ssh.External) } + client, err := host.CreateSSHClient() + if err != nil { return errors.Wrap(err, "Creating ssh client") } diff --git a/pkg/minikube/machine/start.go b/pkg/minikube/machine/start.go index 5328f92ece..ae0bebc0b8 100644 --- a/pkg/minikube/machine/start.go +++ b/pkg/minikube/machine/start.go @@ -45,26 +45,26 @@ import ( "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/out/register" "k8s.io/minikube/pkg/minikube/proxy" + "k8s.io/minikube/pkg/minikube/reason" "k8s.io/minikube/pkg/minikube/registry" + "k8s.io/minikube/pkg/minikube/style" "k8s.io/minikube/pkg/minikube/vmpath" "k8s.io/minikube/pkg/util/lock" ) -var ( - // requiredDirectories are directories to create on the host during setup - requiredDirectories = []string{ - vmpath.GuestAddonsDir, - vmpath.GuestManifestsDir, - vmpath.GuestEphemeralDir, - vmpath.GuestPersistentDir, - vmpath.GuestKubernetesCertsDir, - path.Join(vmpath.GuestPersistentDir, "images"), - path.Join(vmpath.GuestPersistentDir, "binaries"), - vmpath.GuestGvisorDir, - vmpath.GuestCertAuthDir, - vmpath.GuestCertStoreDir, - } -) +// requiredDirectories are directories to create on the host during setup +var requiredDirectories = []string{ + vmpath.GuestAddonsDir, + vmpath.GuestManifestsDir, + vmpath.GuestEphemeralDir, + vmpath.GuestPersistentDir, + vmpath.GuestKubernetesCertsDir, + path.Join(vmpath.GuestPersistentDir, "images"), + path.Join(vmpath.GuestPersistentDir, "binaries"), + vmpath.GuestGvisorDir, + vmpath.GuestCertAuthDir, + vmpath.GuestCertStoreDir, +} // StartHost starts a host VM. func StartHost(api libmachine.API, cfg *config.ClusterConfig, n *config.Node) (*host.Host, bool, error) { @@ -217,17 +217,18 @@ func postStartValidations(h *host.Host, drvName string) { if err != nil { glog.Warningf("error getting command runner: %v", err) } - // make sure /var isn't full, otherwise warn + + // make sure /var isn't full, as pod deployments will fail if it is percentageFull, err := DiskUsed(r, "/var") if err != nil { glog.Warningf("error getting percentage of /var that is free: %v", err) } if percentageFull >= 99 { - exit.WithCodeT(exit.InsufficientStorage, "docker daemon out of memory. No space left on device") + exit.Message(reason.RsrcInsufficientDockerStorage, `Docker is out of disk space! (/var is at {{.p}}% of capacity)`, out.V{"p": percentageFull}) } - if percentageFull > 80 { - out.ErrT(out.Tip, "The docker daemon is almost out of memory, run 'docker system prune' to free up space") + if percentageFull >= 85 { + out.WarnReason(reason.RsrcInsufficientDockerStorage, `Docker is nearly out of disk space, which may cause deployments to fail! ({{.p}}% of capacity)`, out.V{"p": percentageFull}) } } @@ -292,7 +293,6 @@ func acquireMachinesLock(name string, drv string) (mutex.Releaser, error) { spec.Timeout = 13 * time.Minute if driver.IsKIC(drv) { spec.Timeout = 10 * time.Minute - } glog.Infof("acquiring machines lock for %s: %+v", name, spec) @@ -311,17 +311,17 @@ func showHostInfo(cfg config.ClusterConfig) { info, cpuErr, memErr, DiskErr := CachedHostInfo() if cpuErr == nil && memErr == nil && DiskErr == nil { register.Reg.SetStep(register.RunningLocalhost) - out.T(out.StartingNone, "Running on localhost (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...", out.V{"number_of_cpus": info.CPUs, "memory_size": info.Memory, "disk_size": info.DiskSize}) + out.T(style.StartingNone, "Running on localhost (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...", out.V{"number_of_cpus": info.CPUs, "memory_size": info.Memory, "disk_size": info.DiskSize}) } return } if driver.IsKIC(cfg.Driver) { // TODO:medyagh add free disk space on docker machine register.Reg.SetStep(register.CreatingContainer) - out.T(out.StartingVM, "Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB) ...", out.V{"driver_name": cfg.Driver, "number_of_cpus": cfg.CPUs, "memory_size": cfg.Memory, "machine_type": machineType}) + out.T(style.StartingVM, "Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB) ...", out.V{"driver_name": cfg.Driver, "number_of_cpus": cfg.CPUs, "memory_size": cfg.Memory, "machine_type": machineType}) return } register.Reg.SetStep(register.CreatingVM) - out.T(out.StartingVM, "Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...", out.V{"driver_name": cfg.Driver, "number_of_cpus": cfg.CPUs, "memory_size": cfg.Memory, "disk_size": cfg.DiskSize, "machine_type": machineType}) + out.T(style.StartingVM, "Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...", out.V{"driver_name": cfg.Driver, "number_of_cpus": cfg.CPUs, "memory_size": cfg.Memory, "disk_size": cfg.DiskSize, "machine_type": machineType}) } // AddHostAlias makes fine adjustments to pod resources that aren't possible via kubeadm config. diff --git a/pkg/minikube/machine/stop.go b/pkg/minikube/machine/stop.go index ffa033c3ff..bbcd2c12be 100644 --- a/pkg/minikube/machine/stop.go +++ b/pkg/minikube/machine/stop.go @@ -29,6 +29,7 @@ import ( "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/out/register" + "k8s.io/minikube/pkg/minikube/style" "k8s.io/minikube/pkg/util/retry" ) @@ -41,7 +42,7 @@ func StopHost(api libmachine.API, machineName string) error { return errors.Wrapf(err, "load") } - out.T(out.Stopping, `Stopping node "{{.name}}" ...`, out.V{"name": machineName}) + out.T(style.Stopping, `Stopping node "{{.name}}" ...`, out.V{"name": machineName}) return stop(h) } @@ -79,7 +80,7 @@ func trySSHPowerOff(h *host.Host) error { return nil } - out.T(out.Shutdown, `Powering off "{{.profile_name}}" via SSH ...`, out.V{"profile_name": h.Name}) + out.T(style.Shutdown, `Powering off "{{.profile_name}}" via SSH ...`, out.V{"profile_name": h.Name}) // differnet for kic because RunSSHCommand is not implemented by kic if driver.IsKIC(h.DriverName) { err := oci.ShutDown(h.DriverName, h.Name) diff --git a/pkg/minikube/mustload/mustload.go b/pkg/minikube/mustload/mustload.go index da2733b184..3ee4a35b73 100644 --- a/pkg/minikube/mustload/mustload.go +++ b/pkg/minikube/mustload/mustload.go @@ -34,6 +34,8 @@ import ( "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/reason" + "k8s.io/minikube/pkg/minikube/style" ) // ClusterController holds all the needed information for a minikube cluster @@ -64,15 +66,16 @@ func Partial(name string, miniHome ...string) (libmachine.API, *config.ClusterCo glog.Infof("Loading cluster: %s", name) api, err := machine.NewAPIClient(miniHome...) if err != nil { - exit.WithError("libmachine failed", err) + exit.Error(reason.NewAPIClient, "libmachine failed", err) } + cc, err := config.Load(name, miniHome...) if err != nil { if config.IsNotExist(err) { - out.T(out.Shrug, `There is no local cluster named "{{.cluster}}"`, out.V{"cluster": name}) - exitTip("start", name, exit.Data) + out.T(style.Shrug, `There is no local cluster named "{{.cluster}}"`, out.V{"cluster": name}) + exitTip("start", name, reason.ExGuestNotFound) } - exit.WithError("Error getting cluster config", err) + exit.Error(reason.HostConfigLoad, "Error getting cluster config", err) } return api, cc @@ -84,43 +87,43 @@ func Running(name string) ClusterController { cp, err := config.PrimaryControlPlane(cc) if err != nil { - exit.WithError("Unable to find control plane", err) + exit.Error(reason.GuestCpConfig, "Unable to find control plane", err) } machineName := driver.MachineName(*cc, cp) hs, err := machine.Status(api, machineName) if err != nil { - exit.WithError("Unable to get machine status", err) + exit.Error(reason.GuestStatus, "Unable to get machine status", err) } if hs == state.None.String() { - out.T(out.Shrug, `The control plane node "{{.name}}" does not exist.`, out.V{"name": cp.Name}) - exitTip("start", name, exit.Unavailable) + out.T(style.Shrug, `The control plane node "{{.name}}" does not exist.`, out.V{"name": cp.Name}) + exitTip("start", name, reason.ExGuestNotFound) } if hs == state.Stopped.String() { - out.T(out.Shrug, `The control plane node must be running for this command`) - exitTip("start", name, exit.Unavailable) + out.T(style.Shrug, `The control plane node must be running for this command`) + exitTip("start", name, reason.ExGuestUnavailable) } if hs != state.Running.String() { - out.T(out.Shrug, `The control plane node is not running (state={{.state}})`, out.V{"name": cp.Name, "state": hs}) - exitTip("start", name, exit.Unavailable) + out.T(style.Shrug, `The control plane node is not running (state={{.state}})`, out.V{"name": cp.Name, "state": hs}) + exitTip("start", name, reason.ExSvcUnavailable) } host, err := machine.LoadHost(api, name) if err != nil { - exit.WithError("Unable to load host", err) + exit.Error(reason.GuestLoadHost, "Unable to load host", err) } cr, err := machine.CommandRunner(host) if err != nil { - exit.WithError("Unable to get command runner", err) + exit.Error(reason.InternalCommandRunner, "Unable to get command runner", err) } hostname, ip, port, err := driver.ControlPlaneEndpoint(cc, &cp, host.DriverName) if err != nil { - exit.WithError("Unable to get forwarded endpoint", err) + exit.Error(reason.DrvCPEndpoint, "Unable to get forwarded endpoint", err) } return ClusterController{ @@ -144,18 +147,18 @@ func Healthy(name string) ClusterController { as, err := kverify.APIServerStatus(co.CP.Runner, co.CP.Hostname, co.CP.Port) if err != nil { out.FailureT(`Unable to get control plane status: {{.error}}`, out.V{"error": err}) - exitTip("delete", name, exit.Unavailable) + exitTip("delete", name, reason.ExSvcError) } if as == state.Paused { - out.T(out.Shrug, `The control plane for "{{.name}}" is paused!`, out.V{"name": name}) - exitTip("unpause", name, exit.Unavailable) + out.T(style.Shrug, `The control plane for "{{.name}}" is paused!`, out.V{"name": name}) + exitTip("unpause", name, reason.ExSvcConfig) } if as != state.Running { - out.T(out.Shrug, `This control plane is not running! (state={{.state}})`, out.V{"state": as.String()}) + out.T(style.Shrug, `This control plane is not running! (state={{.state}})`, out.V{"state": as.String()}) out.WarningT(`This is unusual - you may want to investigate using "{{.command}}"`, out.V{"command": ExampleCmd(name, "logs")}) - exitTip("start", name, exit.Unavailable) + exitTip("start", name, reason.ExSvcUnavailable) } return co } @@ -171,6 +174,6 @@ func ExampleCmd(cname string, action string) string { // exitTip returns an action tip and exits func exitTip(action string, profile string, code int) { command := ExampleCmd(profile, action) - out.T(out.Workaround, `To fix this, run: "{{.command}}"`, out.V{"command": command}) + out.T(style.Workaround, `To fix this, run: "{{.command}}"`, out.V{"command": command}) os.Exit(code) } diff --git a/pkg/minikube/node/advice.go b/pkg/minikube/node/advice.go index 0ba97e0abb..79bae78c4f 100644 --- a/pkg/minikube/node/advice.go +++ b/pkg/minikube/node/advice.go @@ -20,56 +20,46 @@ import ( "runtime" "github.com/pkg/errors" - "github.com/spf13/viper" "k8s.io/minikube/pkg/drivers/kic/oci" "k8s.io/minikube/pkg/minikube/bootstrapper/kubeadm" - "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" - "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/reason" + "k8s.io/minikube/pkg/minikube/style" ) -// MaybeExitWithAdvice before exiting will try to check for different error types and provide advice if we know for sure what the error is -func MaybeExitWithAdvice(err error) { +// ExitIfFatal before exiting will try to check for different error types and provide advice if we know for sure what the error is +func ExitIfFatal(err error) { if err == nil { return } if errors.Is(err, oci.ErrWindowsContainers) { - out.ErrLn("") - out.ErrT(out.Conflict, "Your Docker Desktop container OS type is Windows but Linux is required.") - out.T(out.Warning, "Please change Docker settings to use Linux containers instead of Windows containers.") - out.T(out.Documentation, "https://minikube.sigs.k8s.io/docs/drivers/docker/#verify-docker-container-type-is-linux") - exit.UsageT(`You can verify your Docker container type by running: -{{.command}} - `, out.V{"command": "docker info --format '{{.OSType}}'"}) + exit.Message(reason.Kind{ + ID: "PROVIDER_DOCKER_CONTAINER_OS", + ExitCode: reason.ExProviderConflict, + Style: style.Conflict, + URL: "https://docs.docker.com/docker-for-windows/#switch-between-windows-and-linux-containers", + Advice: "From the Docker Desktop menu, select 'Switch to Linux containers'", + }, "Docker Desktop is configured for Windows containers, but Linux containers are required for minikube") } if errors.Is(err, oci.ErrCPUCountLimit) { - out.ErrLn("") - out.ErrT(out.Conflict, "{{.name}} doesn't have enough CPUs. ", out.V{"name": driver.FullName(viper.GetString("driver"))}) - if runtime.GOOS != "linux" && viper.GetString("driver") == "docker" { - out.T(out.Warning, "Please consider changing your Docker Desktop's resources.") - out.T(out.Documentation, "https://docs.docker.com/config/containers/resource_constraints/") - } else { - cpuCount := viper.GetInt(cpus) - if cpuCount == 2 { - out.T(out.Tip, "Please ensure your system has {{.cpu_counts}} CPU cores.", out.V{"cpu_counts": viper.GetInt(cpus)}) - } else { - out.T(out.Tip, "Please ensure your {{.driver_name}} system has access to {{.cpu_counts}} CPU cores or reduce the number of the specified CPUs", out.V{"driver_name": driver.FullName(viper.GetString("driver")), "cpu_counts": viper.GetInt(cpus)}) - } + if runtime.GOOS == "darwin" { + exit.Message(reason.RsrcInsufficientDarwinDockerCores, "Docker Desktop has less than 2 CPUs configured, but Kubernetes requires at least 2 to be available") } - exit.UsageT("Ensure your {{.driver_name}} system has enough CPUs. The minimum allowed is 2 CPUs.", out.V{"driver_name": viper.GetString("driver")}) + if runtime.GOOS == "windows" { + exit.Message(reason.RsrcInsufficientWindowsDockerCores, "Docker Desktop has less than 2 CPUs configured, but Kubernetes requires at least 2 to be available") + } + exit.Message(reason.RsrcInsufficientCores, "Docker has less than 2 CPUs available, but Kubernetes requires at least 2 to be available") } if errors.Is(err, kubeadm.ErrNoExecLinux) { - out.ErrLn("") - out.ErrT(out.Conflict, "kubeadm binary is not executable !") - out.T(out.Documentation, "Try the solution in this link: https://github.com/kubernetes/minikube/issues/8327#issuecomment-651288459") - exit.UsageT(`Ensure the binaries are not mounted with "noexec" option. To check run: - - $ findmnt - -`) + exit.Message(reason.Kind{ + ID: "PROVIDER_DOCKER_NOEXEC", + ExitCode: reason.ExProviderPermission, + Style: style.Permissions, + Issues: []int{8327}, + Advice: "Ensure that your Docker mountpoints do not have the 'noexec' flag set", + }, "The kubeadm binary within the Docker container is not executable") } - } diff --git a/pkg/minikube/node/cache.go b/pkg/minikube/node/cache.go index 7097945755..bcb3b7807a 100644 --- a/pkg/minikube/node/cache.go +++ b/pkg/minikube/node/cache.go @@ -37,6 +37,8 @@ import ( "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/reason" + "k8s.io/minikube/pkg/minikube/style" ) const ( @@ -73,17 +75,17 @@ func handleDownloadOnly(cacheGroup, kicGroup *errgroup.Group, k8sVersion string) return } if err := doCacheBinaries(k8sVersion); err != nil { - exit.WithError("Failed to cache binaries", err) + exit.Error(reason.InetCacheBinaries, "Failed to cache binaries", err) } if _, err := CacheKubectlBinary(k8sVersion); err != nil { - exit.WithError("Failed to cache kubectl", err) + exit.Error(reason.InetCacheKubectl, "Failed to cache kubectl", err) } waitCacheRequiredImages(cacheGroup) waitDownloadKicBaseImage(kicGroup) if err := saveImagesToTarFromConfig(); err != nil { - exit.WithError("Failed to cache images to tar", err) + exit.Error(reason.InetCacheTar, "Failed to cache images to tar", err) } - out.T(out.Check, "Download complete!") + out.T(style.Check, "Download complete!") os.Exit(0) } @@ -115,7 +117,7 @@ func beginDownloadKicBaseImage(g *errgroup.Group, cc *config.ClusterConfig, down } glog.Infof("Beginning downloading kic base image for %s with %s", cc.Driver, cc.KubernetesConfig.ContainerRuntime) - out.T(out.Pulling, "Pulling base image ...") + out.T(style.Pulling, "Pulling base image ...") g.Go(func() error { baseImg := cc.KicBaseImage if baseImg == kic.BaseImage && len(cc.KubernetesConfig.ImageRepository) != 0 { @@ -163,20 +165,19 @@ func waitDownloadKicBaseImage(g *errgroup.Group) { if err != nil { if errors.Is(err, image.ErrGithubNeedsLogin) { glog.Warningf("Error downloading kic artifacts: %v", err) - out.ErrT(out.Connectivity, "Unfortunately, could not download the base image {{.image_name}} ", out.V{"image_name": strings.Split(kic.BaseImage, "@")[0]}) + out.ErrT(style.Connectivity, "Unfortunately, could not download the base image {{.image_name}} ", out.V{"image_name": strings.Split(kic.BaseImage, "@")[0]}) out.WarningT("In order to use the fall back image, you need to log in to the github packages registry") - out.T(out.Documentation, `Please visit the following link for documentation around this: + out.T(style.Documentation, `Please visit the following link for documentation around this: https://help.github.com/en/packages/using-github-packages-with-your-projects-ecosystem/configuring-docker-for-use-with-github-packages#authenticating-to-github-packages `) } if errors.Is(err, image.ErrGithubNeedsLogin) || errors.Is(err, image.ErrNeedsLogin) { - exit.UsageT(`Please either authenticate to the registry or use --base-image flag to use a different registry.`) + exit.Message(reason.Usage, `Please either authenticate to the registry or use --base-image flag to use a different registry.`) } else { glog.Errorln("Error downloading kic artifacts: ", err) } } - } glog.Info("Successfully downloaded all kic artifacts") } diff --git a/pkg/minikube/node/config.go b/pkg/minikube/node/config.go index 92ef4c65ca..38535694b8 100644 --- a/pkg/minikube/node/config.go +++ b/pkg/minikube/node/config.go @@ -33,6 +33,8 @@ import ( "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/out/register" + "k8s.io/minikube/pkg/minikube/reason" + "k8s.io/minikube/pkg/minikube/style" "k8s.io/minikube/pkg/util/lock" ) @@ -57,7 +59,7 @@ func configureMounts(wg *sync.WaitGroup) { return } - out.T(out.Mounting, "Creating mount {{.name}} ...", out.V{"name": viper.GetString(mountString)}) + out.T(style.Mounting, "Creating mount {{.name}} ...", out.V{"name": viper.GetString(mountString)}) path := os.Args[0] mountDebugVal := 0 if glog.V(8) { @@ -70,9 +72,9 @@ func configureMounts(wg *sync.WaitGroup) { mountCmd.Stderr = os.Stderr } if err := mountCmd.Start(); err != nil { - exit.WithError("Error starting mount", err) + exit.Error(reason.GuestMount, "Error starting mount", err) } - if err := lock.WriteFile(filepath.Join(localpath.MiniPath(), constants.MountProcessFileName), []byte(strconv.Itoa(mountCmd.Process.Pid)), 0644); err != nil { - exit.WithError("Error writing mount pid", err) + if err := lock.WriteFile(filepath.Join(localpath.MiniPath(), constants.MountProcessFileName), []byte(strconv.Itoa(mountCmd.Process.Pid)), 0o644); err != nil { + exit.Error(reason.HostMountPid, "Error writing mount pid", err) } } diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index 0febcd1a7b..acf7abf952 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -34,7 +34,6 @@ import ( const ( mountString = "mount-string" createMount = "mount" - cpus = "cpus" ) // Add adds a new node config to an existing cluster. @@ -116,7 +115,6 @@ func Delete(cc config.ClusterConfig, name string) (*config.Node, error) { // Retrieve finds the node by name in the given cluster func Retrieve(cc config.ClusterConfig, name string) (*config.Node, int, error) { - for i, n := range cc.Nodes { if n.Name == name { return &n, i, nil diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index ba4b7f7e2a..6f9d24871c 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -55,6 +55,8 @@ import ( "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/out/register" "k8s.io/minikube/pkg/minikube/proxy" + "k8s.io/minikube/pkg/minikube/reason" + "k8s.io/minikube/pkg/minikube/style" "k8s.io/minikube/pkg/util" "k8s.io/minikube/pkg/util/retry" ) @@ -112,14 +114,14 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { bs = setupKubeAdm(starter.MachineAPI, *starter.Cfg, *starter.Node, starter.Runner) err = bs.StartCluster(*starter.Cfg) if err != nil { - MaybeExitWithAdvice(err) + ExitIfFatal(err) out.LogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, *starter.Cfg, starter.Runner)) return nil, err } // write the kubeconfig to the file system after everything required (like certs) are created by the bootstrapper if err := kubeconfig.Update(kcs); err != nil { - return nil, errors.Wrap(err, "Failed to update kubeconfig file.") + return nil, errors.Wrap(err, "Failed kubeconfig update") } } else { bs, err = cluster.Bootstrapper(starter.MachineAPI, viper.GetString(cmdcfg.Bootstrapper), *starter.Cfg, starter.Runner) @@ -134,7 +136,9 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { } var wg sync.WaitGroup - go configureMounts(&wg) + if !driver.IsKIC(starter.Cfg.Driver) { + go configureMounts(&wg) + } wg.Add(1) go func() { @@ -146,6 +150,7 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { // enable addons, both old and new! if starter.ExistingAddons != nil { + wg.Add(1) go addons.Start(&wg, starter.Cfg, starter.ExistingAddons, config.AddonList) } @@ -161,7 +166,6 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { if starter.Cfg.Driver == driver.None && len(starter.Cfg.Nodes) == 1 { prepareNone() } - } else { if err := bs.UpdateNode(*starter.Cfg, *starter.Node, cr); err != nil { return nil, errors.Wrap(err, "update node") @@ -209,9 +213,9 @@ func Provision(cc *config.ClusterConfig, n *config.Node, apiServer bool, delOnFa register.Reg.SetStep(register.StartingNode) name := driver.MachineName(*cc, *n) if apiServer { - out.T(out.ThumbsUp, "Starting control plane node {{.name}} in cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name}) + out.T(style.ThumbsUp, "Starting control plane node {{.name}} in cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name}) } else { - out.T(out.ThumbsUp, "Starting node {{.name}} in cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name}) + out.T(style.ThumbsUp, "Starting node {{.name}} in cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name}) } if driver.IsKIC(cc.Driver) { @@ -232,7 +236,6 @@ func Provision(cc *config.ClusterConfig, n *config.Node, apiServer bool, delOnFa waitDownloadKicBaseImage(&kicGroup) return startMachine(cc, n, delOnFail) - } // ConfigureRuntimes does what needs to happen to get a runtime going. @@ -245,7 +248,7 @@ func configureRuntimes(runner cruntime.CommandRunner, cc config.ClusterConfig, k } cr, err := cruntime.New(co) if err != nil { - exit.WithError("Failed runtime", err) + exit.Error(reason.InternalRuntime, "Failed runtime", err) } disableOthers := true @@ -259,20 +262,20 @@ func configureRuntimes(runner cruntime.CommandRunner, cc config.ClusterConfig, k if err := cr.Preload(cc.KubernetesConfig); err != nil { switch err.(type) { case *cruntime.ErrISOFeature: - out.ErrT(out.Tip, "Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'", out.V{"error": err}) + out.ErrT(style.Tip, "Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'", out.V{"error": err}) default: glog.Warningf("%s preload failed: %v, falling back to caching images", cr.Name(), err) } if err := machine.CacheImagesForBootstrapper(cc.KubernetesConfig.ImageRepository, cc.KubernetesConfig.KubernetesVersion, viper.GetString(cmdcfg.Bootstrapper)); err != nil { - exit.WithError("Failed to cache images", err) + exit.Error(reason.RuntimeCache, "Failed to cache images", err) } } } err = cr.Enable(disableOthers, forceSystemd()) if err != nil { - exit.WithError("Failed to enable container runtime", err) + exit.Error(reason.RuntimeEnable, "Failed to enable container runtime", err) } return cr @@ -286,7 +289,7 @@ func forceSystemd() bool { func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node, r command.Runner) bootstrapper.Bootstrapper { bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cfg, r) if err != nil { - exit.WithError("Failed to get bootstrapper", err) + exit.Error(reason.InternalBootstrapper, "Failed to get bootstrapper", err) } for _, eo := range config.ExtraOptions { out.Infof("{{.extra_option_component_name}}.{{.key}}={{.value}}", out.V{"extra_option_component_name": eo.Component, "key": eo.Key, "value": eo.Value}) @@ -295,11 +298,11 @@ func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node, // update cluster and set up certs if err := bs.UpdateCluster(cfg); err != nil { - exit.WithError("Failed to update cluster", err) + exit.Error(reason.KubernetesInstallFailed, "Failed to update cluster", err) } if err := bs.SetupCerts(cfg.KubernetesConfig, n); err != nil { - exit.WithError("Failed to setup certs", err) + exit.Error(reason.GuestCert, "Failed to setup certs", err) } return bs @@ -308,7 +311,7 @@ func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node, func setupKubeconfig(h *host.Host, cc *config.ClusterConfig, n *config.Node, clusterName string) *kubeconfig.Settings { addr, err := apiServerURL(*h, *cc, *n) if err != nil { - exit.WithError("Failed to get API Server URL", err) + exit.Error(reason.DrvCPEndpoint, "Failed to get API Server URL", err) } if cc.KubernetesConfig.APIServerName != constants.APIServerName { @@ -385,7 +388,7 @@ func startHost(api libmachine.API, cc *config.ClusterConfig, n *config.Node, del return host, exists, err } - out.ErrT(out.Embarrassed, "StartHost failed, but will try again: {{.error}}", out.V{"error": err}) + out.ErrT(style.Embarrassed, "StartHost failed, but will try again: {{.error}}", out.V{"error": err}) glog.Info("Will try again in 5 seconds ...") // Try again, but just once to avoid making the logs overly confusing time.Sleep(5 * time.Second) @@ -406,7 +409,7 @@ func startHost(api libmachine.API, cc *config.ClusterConfig, n *config.Node, del // Don't use host.Driver to avoid nil pointer deref drv := cc.Driver - out.ErrT(out.Sad, `Failed to start {{.driver}} {{.driver_type}}. "{{.cmd}}" may fix it: {{.error}}`, out.V{"driver": drv, "driver_type": driver.MachineType(drv), "cmd": mustload.ExampleCmd(cc.Name, "start"), "error": err}) + out.ErrT(style.Sad, `Failed to start {{.driver}} {{.driver_type}}. "{{.cmd}}" may fix it: {{.error}}`, out.V{"driver": drv, "driver_type": driver.MachineType(drv), "cmd": mustload.ExampleCmd(cc.Name, "start"), "error": err}) return host, exists, err } @@ -422,7 +425,7 @@ func validateNetwork(h *host.Host, r command.Runner, imageRepository string) (st for _, k := range proxy.EnvVars { if v := os.Getenv(k); v != "" { if !optSeen { - out.T(out.Internet, "Found network options:") + out.T(style.Internet, "Found network options:") optSeen = true } out.Infof("{{.key}}={{.value}}", out.V{"key": k, "value": v}) @@ -430,7 +433,7 @@ func validateNetwork(h *host.Host, r command.Runner, imageRepository string) (st k = strings.ToUpper(k) // for http_proxy & https_proxy if (k == "HTTP_PROXY" || k == "HTTPS_PROXY") && !ipExcluded && !warnedOnce { out.WarningT("You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}).", out.V{"ip_address": ip}) - out.T(out.Documentation, "Please see {{.documentation_url}} for more details", out.V{"documentation_url": "https://minikube.sigs.k8s.io/docs/handbook/vpn_and_proxy/"}) + out.T(style.Documentation, "Please see {{.documentation_url}} for more details", out.V{"documentation_url": "https://minikube.sigs.k8s.io/docs/handbook/vpn_and_proxy/"}) warnedOnce = true } } @@ -467,7 +470,7 @@ func trySSH(h *host.Host, ip string) error { err := retry.Expo(dial, time.Second, 13*time.Second) if err != nil { - out.ErrT(out.FailureType, `minikube is unable to connect to the VM: {{.error}} + out.ErrT(style.Failure, `minikube is unable to connect to the VM: {{.error}} This is likely due to one of two reasons: @@ -505,20 +508,20 @@ func tryRegistry(r command.Runner, driverName string, imageRepository string) { if rr, err := r.RunCmd(exec.Command("curl", opts...)); err != nil { glog.Warningf("%s failed: %v", rr.Args, err) out.WarningT("This {{.type}} is having trouble accessing https://{{.repository}}", out.V{"repository": imageRepository, "type": driver.MachineType(driverName)}) - out.ErrT(out.Tip, "To pull new external images, you may need to configure a proxy: https://minikube.sigs.k8s.io/docs/reference/networking/proxy/") + out.ErrT(style.Tip, "To pull new external images, you may need to configure a proxy: https://minikube.sigs.k8s.io/docs/reference/networking/proxy/") } } // prepareNone prepares the user and host for the joy of the "none" driver func prepareNone() { register.Reg.SetStep(register.ConfiguringLHEnv) - out.T(out.StartingNone, "Configuring local host environment ...") + out.T(style.StartingNone, "Configuring local host environment ...") if viper.GetBool(config.WantNoneDriverWarning) { - out.ErrT(out.Empty, "") + out.ErrT(style.Empty, "") out.WarningT("The 'none' driver is designed for experts who need to integrate with an existing VM") - out.ErrT(out.Tip, "Most users should use the newer 'docker' driver instead, which does not require root!") - out.ErrT(out.Documentation, "For more information, see: https://minikube.sigs.k8s.io/docs/reference/drivers/none/") - out.ErrT(out.Empty, "") + out.ErrT(style.Tip, "Most users should use the newer 'docker' driver instead, which does not require root!") + out.ErrT(style.Documentation, "For more information, see: https://minikube.sigs.k8s.io/docs/reference/drivers/none/") + out.ErrT(style.Empty, "") } if os.Getenv("CHANGE_MINIKUBE_NONE_USER") == "" { @@ -526,16 +529,16 @@ func prepareNone() { out.WarningT("kubectl and minikube configuration will be stored in {{.home_folder}}", out.V{"home_folder": home}) out.WarningT("To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:") - out.ErrT(out.Empty, "") - out.ErrT(out.Command, "sudo mv {{.home_folder}}/.kube {{.home_folder}}/.minikube $HOME", out.V{"home_folder": home}) - out.ErrT(out.Command, "sudo chown -R $USER $HOME/.kube $HOME/.minikube") - out.ErrT(out.Empty, "") + out.ErrT(style.Empty, "") + out.ErrT(style.Command, "sudo mv {{.home_folder}}/.kube {{.home_folder}}/.minikube $HOME", out.V{"home_folder": home}) + out.ErrT(style.Command, "sudo chown -R $USER $HOME/.kube $HOME/.minikube") + out.ErrT(style.Empty, "") - out.ErrT(out.Tip, "This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true") + out.ErrT(style.Tip, "This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true") } if err := util.MaybeChownDirRecursiveToMinikubeUser(localpath.MiniPath()); err != nil { - exit.WithCodeT(exit.Permissions, "Failed to change permissions for {{.minikube_dir_path}}: {{.error}}", out.V{"minikube_dir_path": localpath.MiniPath(), "error": err}) + exit.Message(reason.HostHomeChown, "Failed to change permissions for {{.minikube_dir_path}}: {{.error}}", out.V{"minikube_dir_path": localpath.MiniPath(), "error": err}) } } diff --git a/pkg/minikube/notify/notify.go b/pkg/minikube/notify/notify.go index e7fd0ae2ac..8bf34b96c2 100644 --- a/pkg/minikube/notify/notify.go +++ b/pkg/minikube/notify/notify.go @@ -32,6 +32,7 @@ import ( "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/style" "k8s.io/minikube/pkg/util/lock" "k8s.io/minikube/pkg/version" ) @@ -66,8 +67,8 @@ func MaybePrintUpdateText(url string, lastUpdatePath string) bool { glog.Errorf("write time failed: %v", err) } url := "https://github.com/kubernetes/minikube/releases/tag/v" + latestVersion.String() - out.ErrT(out.Celebrate, `minikube {{.version}} is available! Download it: {{.url}}`, out.V{"version": latestVersion, "url": url}) - out.ErrT(out.Tip, "To disable this notice, run: 'minikube config set WantUpdateNotification false'\n") + out.T(style.Celebrate, `minikube {{.version}} is available! Download it: {{.url}}`, out.V{"version": latestVersion, "url": url}) + out.T(style.Tip, "To disable this notice, run: 'minikube config set WantUpdateNotification false'\n") return true } return false @@ -133,7 +134,7 @@ func GetAllVersionsFromURL(url string) (Releases, error) { } func writeTimeToFile(path string, inputTime time.Time) error { - err := lock.WriteFile(path, []byte(inputTime.Format(timeLayout)), 0644) + err := lock.WriteFile(path, []byte(inputTime.Format(timeLayout)), 0o644) if err != nil { return errors.Wrap(err, "Error writing current update time to file: ") } diff --git a/pkg/minikube/notify/notify_test.go b/pkg/minikube/notify/notify_test.go index 46e44d5589..85a135ca2a 100644 --- a/pkg/minikube/notify/notify_test.go +++ b/pkg/minikube/notify/notify_test.go @@ -154,7 +154,7 @@ func TestMaybePrintUpdateText(t *testing.T) { tempDir := tests.MakeTempDir() defer tests.RemoveTempDir(tempDir) outputBuffer := tests.NewFakeFile() - out.SetErrFile(outputBuffer) + out.SetOutFile(outputBuffer) var tc = []struct { len int diff --git a/pkg/minikube/out/out.go b/pkg/minikube/out/out.go index ea54b5e7be..669a68e157 100644 --- a/pkg/minikube/out/out.go +++ b/pkg/minikube/out/out.go @@ -18,7 +18,10 @@ limitations under the License. package out import ( + "bytes" "fmt" + "html" + "html/template" "io" "os" "strconv" @@ -27,6 +30,7 @@ import ( "github.com/golang/glog" isatty "github.com/mattn/go-isatty" "k8s.io/minikube/pkg/minikube/out/register" + "k8s.io/minikube/pkg/minikube/style" "k8s.io/minikube/pkg/minikube/translate" ) @@ -35,7 +39,7 @@ import ( // // out.SetOutFile(os.Stdout) // out.String("Starting up!") -// out.T(out.StatusChange, "Configuring things") +// out.T(style.StatusChange, "Configuring things") // out.SetErrFile(os.Stderr) // out.Fatal("Oh no, everything failed.") @@ -68,12 +72,12 @@ type fdWriter interface { type V map[string]interface{} // T writes a stylized and templated message to stdout -func T(style StyleEnum, format string, a ...V) { - if style == Option { +func T(st style.Enum, format string, a ...V) { + if st == style.Option { Infof(format, a...) return } - outStyled := ApplyTemplateFormatting(style, useColor, format, a...) + outStyled := stylized(st, useColor, format, a...) if JSON { register.PrintStep(outStyled) return @@ -84,7 +88,7 @@ func T(style StyleEnum, format string, a ...V) { // Infof is used for informational logs (options, env variables, etc) func Infof(format string, a ...V) { - outStyled := ApplyTemplateFormatting(Option, useColor, format, a...) + outStyled := stylized(style.Option, useColor, format, a...) if JSON { register.PrintInfo(outStyled) return @@ -119,19 +123,9 @@ func Ln(format string, a ...interface{}) { String(format+"\n", a...) } -// ErrWithExitCode includes the exit code in JSON output -func ErrWithExitCode(style StyleEnum, format string, exitcode int, a ...V) { - if JSON { - errStyled := ApplyTemplateFormatting(style, useColor, format, a...) - register.PrintErrorExitCode(errStyled, exitcode) - return - } - ErrT(style, format, a...) -} - // ErrT writes a stylized and templated error message to stderr -func ErrT(style StyleEnum, format string, a ...V) { - errStyled := ApplyTemplateFormatting(style, useColor, format, a...) +func ErrT(st style.Enum, format string, a ...V) { + errStyled := stylized(st, useColor, format, a...) Err(errStyled) } @@ -163,26 +157,26 @@ func ErrLn(format string, a ...interface{}) { // SuccessT is a shortcut for writing a templated success message to stdout func SuccessT(format string, a ...V) { - T(SuccessType, format, a...) + T(style.Success, format, a...) } // FatalT is a shortcut for writing a templated fatal message to stderr func FatalT(format string, a ...V) { - ErrT(FatalType, format, a...) + ErrT(style.Fatal, format, a...) } // WarningT is a shortcut for writing a templated warning message to stderr func WarningT(format string, a ...V) { if JSON { - register.PrintWarning(ApplyTemplateFormatting(Warning, useColor, format, a...)) + register.PrintWarning(stylized(style.Warning, useColor, format, a...)) return } - ErrT(Warning, format, a...) + ErrT(style.Warning, format, a...) } // FailureT is a shortcut for writing a templated failure message to stderr func FailureT(format string, a ...V) { - ErrT(FailureType, format, a...) + ErrT(style.Failure, format, a...) } // SetOutFile configures which writer standard output goes to. @@ -225,6 +219,11 @@ func wantsColor(fd uintptr) bool { } } + // New Windows Terminal + if os.Getenv("WT_SESSION") != "" { + return true + } + term := os.Getenv("TERM") colorTerm := os.Getenv("COLORTERM") // Example: term-256color @@ -243,12 +242,12 @@ func LogEntries(msg string, err error, entries map[string][]string) { DisplayError(msg, err) for name, lines := range entries { - T(FailureType, "Problems detected in {{.entry}}:", V{"entry": name}) + T(style.Failure, "Problems detected in {{.entry}}:", V{"entry": name}) if len(lines) > MaxLogEntries { lines = lines[:MaxLogEntries] } for _, l := range lines { - T(LogEntry, l) + T(style.LogEntry, l) } } } @@ -261,9 +260,46 @@ func DisplayError(msg string, err error) { return } // use Warning because Error will display a duplicate message to stderr - ErrT(Empty, "") + ErrT(style.Empty, "") FatalT("{{.msg}}: {{.err}}", V{"msg": translate.T(msg), "err": err}) - ErrT(Empty, "") - ErrT(Sad, "minikube is exiting due to an error. If the above message is not useful, open an issue:") - ErrT(URL, "https://github.com/kubernetes/minikube/issues/new/choose") + ErrT(style.Empty, "") + ErrT(style.Sad, "minikube is exiting due to an error. If the above message is not useful, open an issue:") + ErrT(style.URL, "https://github.com/kubernetes/minikube/issues/new/choose") +} + +// applyTmpl applies formatting +func applyTmpl(format string, a ...V) string { + if len(a) == 0 { + glog.Warningf("no arguments passed for %q - returning raw string", format) + return format + } + + var buf bytes.Buffer + t, err := template.New(format).Parse(format) + if err != nil { + glog.Errorf("unable to parse %q: %v - returning raw string.", format, err) + return format + } + err = t.Execute(&buf, a[0]) + if err != nil { + glog.Errorf("unable to execute %s: %v - returning raw string.", format, err) + return format + } + out := buf.String() + + // Return quotes back to normal + out = html.UnescapeString(out) + + // escape any outstanding '%' signs so that they don't get interpreted + // as a formatting directive down the line + out = strings.Replace(out, "%", "%%", -1) + // avoid doubling up in case this function is called multiple times + out = strings.Replace(out, "%%%%", "%%", -1) + return out +} + +// Fmt applies formatting and translation +func Fmt(format string, a ...V) string { + format = translate.T(format) + return applyTmpl(format, a...) } diff --git a/pkg/minikube/out/out_reason.go b/pkg/minikube/out/out_reason.go new file mode 100644 index 0000000000..3907e2107e --- /dev/null +++ b/pkg/minikube/out/out_reason.go @@ -0,0 +1,120 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Copyright 2019 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY knd, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package out provides a mechanism for sending localized, stylized output to the console. +package out + +import ( + "strings" + + "k8s.io/minikube/pkg/minikube/out/register" + "k8s.io/minikube/pkg/minikube/reason" + "k8s.io/minikube/pkg/minikube/style" +) + +// Error shows an an error reason +func Error(k reason.Kind, format string, a ...V) { + if JSON { + msg := Fmt(format, a...) + register.PrintErrorExitCode(strings.TrimSpace(msg), k.ExitCode, map[string]string{ + "name": k.ID, + "advice": k.Advice, + "url": k.URL, + "issues": strings.Join(k.IssueURLs(), ","), + }) + } else { + displayText(k, format, a...) + } +} + +// WarnReason shows a warning reason +func WarnReason(k reason.Kind, format string, a ...V) { + if JSON { + msg := Fmt(format, a...) + register.PrintWarning(msg) + } else { + displayText(k, format, a...) + } +} + +// indentMultiLine indents a message if it contains multiple lines +func indentMultiLine(s string) string { + if !strings.Contains(s, "\n") { + return s + } + + cleaned := []string{"\n"} + for _, sn := range strings.Split(s, "\n") { + cleaned = append(cleaned, style.Indented+strings.TrimSpace(sn)) + } + return strings.Join(cleaned, "\n") +} + +func displayText(k reason.Kind, format string, a ...V) { + Ln("") + st := k.Style + + if st == style.None { + st = style.KnownIssue + } + + ErrT(st, format, a...) + + if k.Advice != "" { + + advice := indentMultiLine(Fmt(k.Advice, a...)) + ErrT(style.Tip, Fmt("Suggestion: {{.advice}}", V{"advice": advice})) + } + + if k.URL != "" { + ErrT(style.Documentation, "Documentation: {{.url}}", V{"url": k.URL}) + } + + issueURLs := k.IssueURLs() + if len(issueURLs) == 1 { + ErrT(style.Issues, "Related issue: {{.url}}", V{"url": issueURLs[0]}) + } + + if len(issueURLs) > 1 { + ErrT(style.Issues, "Related issues:") + for _, i := range issueURLs { + ErrT(style.Issue, "{{.url}}", V{"url": i}) + } + } + + if k.NewIssueLink { + ErrT(style.Empty, "") + ErrT(style.Sad, "If the above advice does not help, please let us know: ") + ErrT(style.URL, "https://github.com/kubernetes/minikube/issues/new/choose") + } + Ln("") +} diff --git a/pkg/minikube/out/out_reason_test.go b/pkg/minikube/out/out_reason_test.go new file mode 100644 index 0000000000..1a327d3a7c --- /dev/null +++ b/pkg/minikube/out/out_reason_test.go @@ -0,0 +1,128 @@ +/* +Copyright 2019 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package out + +import ( + "bytes" + "os" + "strings" + "testing" + + "k8s.io/minikube/pkg/minikube/out/register" + "k8s.io/minikube/pkg/minikube/reason" +) + +type buffFd struct { + bytes.Buffer + uptr uintptr +} + +func (b buffFd) Fd() uintptr { return b.uptr } + +func TestDisplayProblem(t *testing.T) { + buffErr := buffFd{} + SetErrFile(&buffErr) + tests := []struct { + description string + issue reason.Kind + expected string + }{ + { + issue: reason.Kind{ + ID: "example", + URL: "example.com", + }, + description: "url, id and err", + expected: `X Something failed +* Documentation: example.com +`, + }, + { + issue: reason.Kind{ID: "example", URL: "example.com", Issues: []int{0, 1}, Advice: "you need a hug"}, + description: "with 2 issues and suggestion", + expected: `X Something failed +* Suggestion: you need a hug +* Documentation: example.com +* Related issues: + - https://github.com/kubernetes/minikube/issues/0 + - https://github.com/kubernetes/minikube/issues/1 +`, + }, + { + issue: reason.Kind{ID: "example", URL: "example.com", Issues: []int{0, 1}}, + description: "with 2 issues", + expected: `X Something failed +* Documentation: example.com +* Related issues: + - https://github.com/kubernetes/minikube/issues/0 + - https://github.com/kubernetes/minikube/issues/1 +`, + }, + } + for _, tc := range tests { + t.Run(tc.description, func(t *testing.T) { + buffErr.Truncate(0) + JSON = false + Error(tc.issue, "Something failed") + errStr := buffErr.String() + if strings.TrimSpace(errStr) != strings.TrimSpace(tc.expected) { + t.Fatalf("Expected errString:\n%v\ngot:\n%v\n", tc.expected, errStr) + } + }) + } +} + +func TestDisplayJSON(t *testing.T) { + defer SetJSON(false) + SetJSON(true) + + tcs := []struct { + k *reason.Kind + expected string + }{ + { + k: &reason.Kind{ + + ID: "BUG", + ExitCode: 4, + Advice: "fix me!", + Issues: []int{1, 2}, + URL: "url", + }, + expected: `{"data":{"advice":"fix me!","exitcode":"4","issues":"https://github.com/kubernetes/minikube/issues/1,https://github.com/kubernetes/minikube/issues/2","message":"my error","name":"BUG","url":"url"},"datacontenttype":"application/json","id":"random-id","source":"https://minikube.sigs.k8s.io/","specversion":"1.0","type":"io.k8s.sigs.minikube.error"} +`, + }, + } + for _, tc := range tcs { + t.Run(tc.k.ID, func(t *testing.T) { + buf := bytes.NewBuffer([]byte{}) + register.SetOutputFile(buf) + defer func() { register.SetOutputFile(os.Stdout) }() + + register.GetUUID = func() string { + return "random-id" + } + + JSON = true + Error(*tc.k, "my error") + actual := buf.String() + if actual != tc.expected { + t.Fatalf("expected didn't match actual:\nExpected:\n%v\n\nActual:\n%v", tc.expected, actual) + } + }) + } +} diff --git a/pkg/minikube/out/out_style.go b/pkg/minikube/out/out_style.go new file mode 100644 index 0000000000..33260e1798 --- /dev/null +++ b/pkg/minikube/out/out_style.go @@ -0,0 +1,60 @@ +/* +Copyright 2019 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package out provides a mechanism for sending localized, stylized output to the console. +package out + +import ( + "k8s.io/minikube/pkg/minikube/style" + "k8s.io/minikube/pkg/minikube/translate" +) + +// Add a prefix to a string +func applyPrefix(prefix, format string) string { + if prefix == "" { + return format + } + return prefix + format +} + +// applyStyle translates the given string if necessary then adds any appropriate style prefix. +func applyStyle(st style.Enum, useColor bool, format string) string { + format = translate.T(format) + + s, ok := style.Config[st] + if !s.OmitNewline { + format += "\n" + } + + // Similar to CSS styles, if no style matches, output an unformatted string. + if !ok || JSON { + return format + } + + if !useColor { + return applyPrefix(style.LowPrefix(s), format) + } + return applyPrefix(s.Prefix, format) +} + +// stylized applies formatting to the provided template +func stylized(st style.Enum, useColor bool, format string, a ...V) string { + if a == nil { + a = []V{{}} + } + format = applyStyle(st, useColor, format) + return Fmt(format, a...) +} diff --git a/pkg/minikube/out/style_test.go b/pkg/minikube/out/out_style_test.go similarity index 66% rename from pkg/minikube/out/style_test.go rename to pkg/minikube/out/out_style_test.go index 65f63c59b2..fb231d333a 100644 --- a/pkg/minikube/out/style_test.go +++ b/pkg/minikube/out/out_style_test.go @@ -20,11 +20,12 @@ import ( "fmt" "strings" "testing" + + "k8s.io/minikube/pkg/minikube/style" ) func TestApplyPrefix(t *testing.T) { - - var tests = []struct { + tests := []struct { prefix, format, expected, description string }{ { @@ -50,56 +51,18 @@ func TestApplyPrefix(t *testing.T) { } } -func TestLowPrefix(t *testing.T) { - - var tests = []struct { - expected string - description string - style style - }{ - { - expected: lowBullet, - description: "empty prefix", - }, - { - expected: "bar", - style: style{LowPrefix: "bar"}, - description: "lowPrefix", - }, - { - expected: lowBullet, - style: style{Prefix: "foo"}, - description: "prefix without spaces", - }, - { - expected: lowIndent, - style: style{Prefix: " foo"}, - description: "prefix with spaces", - }, - } - for _, test := range tests { - t.Run(test.description, func(t *testing.T) { - got := lowPrefix(test.style) - if got != test.expected { - t.Errorf("Expected %v but got %v", test.expected, got) - } - }) - } -} - func TestApplyStyle(t *testing.T) { - - var tests = []struct { + tests := []struct { expected string description string - styleEnum StyleEnum + styleEnum style.Enum format string useColor bool }{ { - expected: fmt.Sprintf("%sbar", lowBullet), + expected: fmt.Sprintf("%sbar", style.LowBullet), description: "format bar, empty style, color off", - styleEnum: Empty, + styleEnum: style.Empty, useColor: false, format: "bar", }, @@ -111,9 +74,9 @@ func TestApplyStyle(t *testing.T) { format: "bar", }, { - expected: fmt.Sprintf("%sfoo", styles[Ready].Prefix), + expected: fmt.Sprintf("%sfoo", style.Config[style.Ready].Prefix), description: "format foo, ready style, color on", - styleEnum: Ready, + styleEnum: style.Ready, useColor: true, format: "foo", }, @@ -130,19 +93,18 @@ func TestApplyStyle(t *testing.T) { } func TestApplyTemplateFormating(t *testing.T) { - - var tests = []struct { + tests := []struct { expected string description string - styleEnum StyleEnum + styleEnum style.Enum format string useColor bool a []V }{ { - expected: fmt.Sprintf("%sbar", lowBullet), + expected: fmt.Sprintf("%sbar", style.LowBullet), description: "format bar, empty style, color off", - styleEnum: Empty, + styleEnum: style.Empty, useColor: false, format: "bar", }, @@ -154,30 +116,30 @@ func TestApplyTemplateFormating(t *testing.T) { format: "bar", }, { - expected: fmt.Sprintf("%sfoo", styles[Ready].Prefix), + expected: fmt.Sprintf("%sfoo", style.Config[style.Ready].Prefix), description: "format foo, ready style, color on, a nil", - styleEnum: Ready, + styleEnum: style.Ready, useColor: true, format: "foo", }, { - expected: fmt.Sprintf("%sfoo", styles[Ready].Prefix), + expected: fmt.Sprintf("%sfoo", style.Config[style.Ready].Prefix), description: "format foo, ready style, color on", - styleEnum: Ready, + styleEnum: style.Ready, useColor: true, format: "foo", }, { - expected: fmt.Sprintf("%s{{ a }}", styles[Ready].Prefix), + expected: fmt.Sprintf("%s{{ a }}", style.Config[style.Ready].Prefix), description: "bad format", - styleEnum: Ready, + styleEnum: style.Ready, useColor: true, format: "{{ a }}", }, } for _, test := range tests { t.Run(test.description, func(t *testing.T) { - rawGot := ApplyTemplateFormatting(test.styleEnum, test.useColor, test.format, test.a...) + rawGot := stylized(test.styleEnum, test.useColor, test.format, test.a...) got := strings.TrimSpace(rawGot) if got != test.expected { t.Errorf("Expected '%v' but got '%v'", test.expected, got) diff --git a/pkg/minikube/out/out_test.go b/pkg/minikube/out/out_test.go index dee4e4ca51..8fbc616b92 100644 --- a/pkg/minikube/out/out_test.go +++ b/pkg/minikube/out/out_test.go @@ -22,6 +22,7 @@ import ( "strconv" "testing" + "k8s.io/minikube/pkg/minikube/style" "k8s.io/minikube/pkg/minikube/tests" "k8s.io/minikube/pkg/minikube/translate" ) @@ -35,20 +36,20 @@ func TestOutT(t *testing.T) { "Installing Kubernetes version {{.version}} ...": "... {{.version}} ΨͺثبيΨͺ Kubernetes Ψ§Ω„Ψ₯Ψ΅Ψ―Ψ§Ψ±", } - var testCases = []struct { - style StyleEnum + testCases := []struct { + style style.Enum message string params V want string wantASCII string }{ - {Happy, "Happy", nil, "πŸ˜„ Happy\n", "* Happy\n"}, - {Option, "Option", nil, " β–ͺ Option\n", " - Option\n"}, - {Warning, "Warning", nil, "❗ Warning\n", "! Warning\n"}, - {FatalType, "Fatal: {{.error}}", V{"error": "ugh"}, "πŸ’£ Fatal: ugh\n", "X Fatal: ugh\n"}, - {Issue, "http://i/{{.number}}", V{"number": 10000}, " β–ͺ http://i/10000\n", " - http://i/10000\n"}, - {Usage, "raw: {{.one}} {{.two}}", V{"one": "'%'", "two": "%d"}, "πŸ’‘ raw: '%' %d\n", "* raw: '%' %d\n"}, - {Running, "Installing Kubernetes version {{.version}} ...", V{"version": "v1.13"}, "πŸƒ ... v1.13 ΨͺثبيΨͺ Kubernetes Ψ§Ω„Ψ₯Ψ΅Ψ―Ψ§Ψ±\n", "* ... v1.13 ΨͺثبيΨͺ Kubernetes Ψ§Ω„Ψ₯Ψ΅Ψ―Ψ§Ψ±\n"}, + {style.Happy, "Happy", nil, "πŸ˜„ Happy\n", "* Happy\n"}, + {style.Option, "Option", nil, " β–ͺ Option\n", " - Option\n"}, + {style.Warning, "Warning", nil, "❗ Warning\n", "! Warning\n"}, + {style.Fatal, "Fatal: {{.error}}", V{"error": "ugh"}, "πŸ’£ Fatal: ugh\n", "X Fatal: ugh\n"}, + {style.Issue, "http://i/{{.number}}", V{"number": 10000}, " β–ͺ http://i/10000\n", " - http://i/10000\n"}, + {style.Usage, "raw: {{.one}} {{.two}}", V{"one": "'%'", "two": "%d"}, "πŸ’‘ raw: '%' %d\n", "* raw: '%' %d\n"}, + {style.Running, "Installing Kubernetes version {{.version}} ...", V{"version": "v1.13"}, "πŸƒ ... v1.13 ΨͺثبيΨͺ Kubernetes Ψ§Ω„Ψ₯Ψ΅Ψ―Ψ§Ψ±\n", "* ... v1.13 ΨͺثبيΨͺ Kubernetes Ψ§Ω„Ψ₯Ψ΅Ψ―Ψ§Ψ±\n"}, } for _, tc := range testCases { for _, override := range []bool{true, false} { @@ -74,7 +75,7 @@ func TestOutT(t *testing.T) { func TestOut(t *testing.T) { os.Setenv(OverrideEnv, "") - var testCases = []struct { + testCases := []struct { format string arg interface{} want string diff --git a/pkg/minikube/out/register/cloud_events.go b/pkg/minikube/out/register/cloud_events.go index d654e4ba78..fe537e17c2 100644 --- a/pkg/minikube/out/register/cloud_events.go +++ b/pkg/minikube/out/register/cloud_events.go @@ -46,7 +46,7 @@ func SetOutputFile(w io.Writer) { // SetEventLogPath sets the path of an event log file func SetEventLogPath(path string) { if _, err := os.Stat(filepath.Dir(path)); err != nil { - if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil { + if err := os.MkdirAll(filepath.Dir(path), 0o777); err != nil { glog.Errorf("Error creating profile directory: %v", err) return } diff --git a/pkg/minikube/out/register/json_test.go b/pkg/minikube/out/register/json_test.go index f6035273fd..c1fb1c55ee 100644 --- a/pkg/minikube/out/register/json_test.go +++ b/pkg/minikube/out/register/json_test.go @@ -104,6 +104,7 @@ func TestErrorExitCode(t *testing.T) { t.Fatalf("expected didn't match actual:\nExpected:\n%v\n\nActual:\n%v", expected, actual) } } + func TestWarning(t *testing.T) { expected := `{"data":{"message":"warning"},"datacontenttype":"application/json","id":"random-id","source":"https://minikube.sigs.k8s.io/","specversion":"1.0","type":"io.k8s.sigs.minikube.warning"}` expected += "\n" diff --git a/pkg/minikube/out/register/log.go b/pkg/minikube/out/register/log.go index 67b24885f5..d8106cd1a7 100644 --- a/pkg/minikube/out/register/log.go +++ b/pkg/minikube/out/register/log.go @@ -16,7 +16,10 @@ limitations under the License. package register -import "fmt" +import ( + "fmt" + "strings" +) // Log represents the different types of logs that can be output as JSON // This includes: Step, Download, DownloadProgress, Warning, Info, Error @@ -39,7 +42,7 @@ func NewStep(message string) *Step { return &Step{data: map[string]string{ "totalsteps": Reg.totalSteps(), "currentstep": Reg.currentStep(), - "message": message, + "message": strings.TrimSpace(message), "name": string(Reg.current), }} } @@ -92,7 +95,7 @@ type Warning struct { func NewWarning(warning string) *Warning { return &Warning{ map[string]string{ - "message": warning, + "message": strings.TrimSpace(warning), }, } } @@ -115,7 +118,7 @@ func (s *Info) Type() string { func NewInfo(message string) *Info { return &Info{ map[string]string{ - "message": message, + "message": strings.TrimSpace(message), }, } } @@ -128,18 +131,18 @@ type Error struct { func NewError(err string) *Error { return &Error{ map[string]string{ - "message": err, + "message": strings.TrimSpace(err), }, } } // NewErrorExitCode returns an error that has an associated exit code func NewErrorExitCode(err string, exitcode int, additionalData ...map[string]string) *Error { - e := NewError(err) + e := NewError(strings.TrimSpace(err)) e.data["exitcode"] = fmt.Sprintf("%v", exitcode) for _, a := range additionalData { for k, v := range a { - e.data[k] = v + e.data[k] = strings.TrimSpace(v) } } return e diff --git a/pkg/minikube/perf/binary.go b/pkg/minikube/perf/binary.go index cfafacc7da..83644e9a32 100644 --- a/pkg/minikube/perf/binary.go +++ b/pkg/minikube/perf/binary.go @@ -17,15 +17,17 @@ limitations under the License. package perf import ( + "context" "fmt" "io" - "net/http" "os" "path/filepath" "strconv" "strings" + "cloud.google.com/go/storage" "github.com/pkg/errors" + "google.golang.org/api/option" "k8s.io/minikube/pkg/minikube/constants" ) @@ -37,6 +39,7 @@ type Binary struct { const ( prPrefix = "pr://" + bucket = "minikube-builds" ) // NewBinary returns a new binary type @@ -58,6 +61,33 @@ func (b *Binary) Name() string { return filepath.Base(b.path) } +func (b *Binary) download() error { + ctx := context.Background() + client, err := storage.NewClient(ctx, option.WithoutAuthentication()) + if err != nil { + return errors.Wrap(err, "getting storage client") + } + defer client.Close() + rc, err := client.Bucket(bucket).Object(fmt.Sprintf("%d/minikube-linux-amd64", b.pr)).NewReader(ctx) + if err != nil { + return errors.Wrap(err, "getting minikube object from gcs bucket") + } + defer rc.Close() + + if err := os.MkdirAll(filepath.Dir(b.path), 0777); err != nil { + return err + } + + f, err := os.OpenFile(b.path, os.O_CREATE|os.O_RDWR, 0777) + if err != nil { + return err + } + defer f.Close() + + _, err = io.Copy(f, rc) + return err +} + // newBinaryFromPR downloads the minikube binary built for the pr by Jenkins from GCS func newBinaryFromPR(pr string) (*Binary, error) { pr = strings.TrimPrefix(pr, prPrefix) @@ -71,39 +101,12 @@ func newBinaryFromPR(pr string) (*Binary, error) { path: localMinikubePath(i), pr: i, } - - if err := downloadBinary(remoteMinikubeURL(i), b.path); err != nil { - return nil, errors.Wrapf(err, "downloading minikube") + if err := b.download(); err != nil { + return nil, errors.Wrapf(err, "downloading binary") } - return b, nil } -func remoteMinikubeURL(pr int) string { - return fmt.Sprintf("https://storage.googleapis.com/minikube-builds/%d/minikube-linux-amd64", pr) -} - func localMinikubePath(pr int) string { return fmt.Sprintf("%s/minikube-binaries/%d/minikube", constants.DefaultMinipath, pr) } - -func downloadBinary(url, path string) error { - resp, err := http.Get(url) - if err != nil { - return err - } - defer resp.Body.Close() - - if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil { - return err - } - - f, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0777) - if err != nil { - return err - } - defer f.Close() - - _, err = io.Copy(f, resp.Body) - return err -} diff --git a/pkg/minikube/perf/binary_test.go b/pkg/minikube/perf/binary_test.go index e068a1d7e1..8a6c651f67 100644 --- a/pkg/minikube/perf/binary_test.go +++ b/pkg/minikube/perf/binary_test.go @@ -52,10 +52,6 @@ func TestNewBinary(t *testing.T) { input, prNum string errExpected bool }{ - { - input: prPrefix + "42", - prNum: "42", - }, { input: "42", prNum: "42", diff --git a/pkg/minikube/perf/start.go b/pkg/minikube/perf/start.go index e41547f31c..3a9c62bf9b 100644 --- a/pkg/minikube/perf/start.go +++ b/pkg/minikube/perf/start.go @@ -36,9 +36,13 @@ const ( func CompareMinikubeStart(ctx context.Context, out io.Writer, binaries []*Binary) error { drivers := []string{"kvm2", "docker"} for _, d := range drivers { + fmt.Printf("**%s Driver**\n", d) + if err := downloadArtifacts(ctx, binaries, d); err != nil { + fmt.Printf("error downloading artifacts: %v", err) + continue + } rm, err := collectResults(ctx, binaries, d) if err != nil { - fmt.Printf("**%s Driver**\n", d) fmt.Printf("error collecting results for %s driver: %v\n", d, err) continue } @@ -58,6 +62,11 @@ func collectResults(ctx context.Context, binaries []*Binary, driver string) (*re return nil, errors.Wrapf(err, "timing run %d with %s", run, binary.Name()) } rm.addResult(binary, r) + r, err = timeEnableIngress(ctx, binary) + if err != nil { + return nil, errors.Wrapf(err, "timing run %d with %s", run, binary.Name()) + } + rm.addResult(binary, r) } } return rm, nil @@ -71,19 +80,23 @@ func average(nums []float64) float64 { return total / float64(len(nums)) } +func downloadArtifacts(ctx context.Context, binaries []*Binary, driver string) error { + for _, b := range binaries { + c := exec.CommandContext(ctx, b.path, "start", fmt.Sprintf("--driver=%s", driver), "--download-only") + c.Stderr = os.Stderr + log.Printf("Running: %v...", c.Args) + if err := c.Run(); err != nil { + return errors.Wrap(err, "downloading artifacts") + } + } + return nil +} + // timeMinikubeStart returns the time it takes to execute `minikube start` -// It deletes the VM after `minikube start`. func timeMinikubeStart(ctx context.Context, binary *Binary, driver string) (*result, error) { startCmd := exec.CommandContext(ctx, binary.path, "start", fmt.Sprintf("--driver=%s", driver)) startCmd.Stderr = os.Stderr - deleteCmd := exec.CommandContext(ctx, binary.path, "delete") - defer func() { - if err := deleteCmd.Run(); err != nil { - log.Printf("error deleting minikube: %v", err) - } - }() - log.Printf("Running: %v...", startCmd.Args) r, err := timeCommandLogs(startCmd) if err != nil { @@ -91,3 +104,24 @@ func timeMinikubeStart(ctx context.Context, binary *Binary, driver string) (*res } return r, nil } + +// timeEnableIngress returns the time it takes to execute `minikube addons enable ingress` +// It deletes the VM after `minikube addons enable ingress`. +func timeEnableIngress(ctx context.Context, binary *Binary) (*result, error) { + enableCmd := exec.CommandContext(ctx, binary.path, "addons enable ingress") + enableCmd.Stderr = os.Stderr + + deleteCmd := exec.CommandContext(ctx, binary.path, "delete") + defer func() { + if err := deleteCmd.Run(); err != nil { + log.Printf("error deleting minikube: %v", err) + } + }() + + log.Printf("Running: %v...", enableCmd.Args) + r, err := timeCommandLogs(enableCmd) + if err != nil { + return nil, errors.Wrapf(err, "timing cmd: %v", enableCmd.Args) + } + return r, nil +} diff --git a/pkg/minikube/problem/err_map.go b/pkg/minikube/problem/err_map.go deleted file mode 100644 index 4dec02bd31..0000000000 --- a/pkg/minikube/problem/err_map.go +++ /dev/null @@ -1,595 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package problem - -import "regexp" - -// re is a shortcut around regexp.MustCompile -func re(s string) *regexp.Regexp { - return regexp.MustCompile(s) -} - -// vmProblems are VM related problems -var vmProblems = map[string]match{ - // Generic VM driver - "DRIVER_CORRUPT": { - Regexp: re(`Error attempting to get plugin server address for RPC`), - Advice: "The VM driver exited with an error, and may be corrupt. Run 'minikube start' with --alsologtostderr -v=8 to see the error", - URL: "https://minikube.sigs.k8s.io/docs/reference/drivers/", - ShowIssueLink: true, - }, - "DRIVER_EXITED": { - Regexp: re(`Unable to start VM: start: exit status 1`), - Advice: "The VM driver crashed. Run 'minikube start --alsologtostderr -v=8' to see the VM driver error message", - URL: "https://minikube.sigs.k8s.io/docs/reference/drivers/#troubleshooting", - ShowIssueLink: true, - }, - "DRIVER_NOT_FOUND": { - Regexp: re(`registry: driver not found`), - Advice: "Your minikube config refers to an unsupported driver. Erase ~/.minikube, and try again.", - Issues: []int{5295}, - }, - "DRIVER_MISSING_ADDRESS": { - Regexp: re(`new host: dial tcp: missing address`), - Advice: "The machine-driver specified is failing to start. Try running 'docker-machine-driver- version'", - Issues: []int{6023, 4679}, - ShowIssueLink: true, - }, - "PRECREATE_EXIT_1": { - Regexp: re(`precreate: exit status 1`), - Advice: "The hypervisor does not appear to be configured properly. Run 'minikube start --alsologtostderr -v=1' and inspect the error code", - Issues: []int{6098}, - ShowIssueLink: true, - }, - "FILE_IN_USE": { - Regexp: re(`The process cannot access the file because it is being used by another process`), - Advice: "Another program is using a file required by minikube. If you are using Hyper-V, try stopping the minikube VM from within the Hyper-V manager", - URL: "https://docs.docker.com/machine/drivers/hyper-v/", - GOOS: []string{"windows"}, - Issues: []int{7300}, - }, - "CREATE_TIMEOUT": { - Regexp: re(`create host timed out in \d`), - Advice: "Try 'minikube delete', and disable any conflicting VPN or firewall software", - Issues: []int{7072}, - }, - "IMAGE_ARCH": { - Regexp: re(`Error: incompatible image architecture`), - Advice: "This driver does not yet work on your architecture. Maybe try --driver=none", - GOOS: []string{"linux"}, - Issues: []int{7071}, - }, - // Docker - "DOCKER_WSL2_MOUNT": { - Regexp: re(`cannot find cgroup mount destination: unknown`), - Advice: "Run: 'sudo mkdir /sys/fs/cgroup/systemd && sudo mount -t cgroup -o none,name=systemd cgroup /sys/fs/cgroup/systemd'", - URL: "https://github.com/microsoft/WSL/issues/4189", - Issues: []int{5392}, - GOOS: []string{"linux"}, - }, - "DOCKER_READONLY": { - Regexp: re(`mkdir /var/lib/docker/volumes.*: read-only file system`), - Advice: "Restart Docker", - Issues: []int{6825}, - }, - "DOCKER_CHROMEOS": { - Regexp: re(`Container.*is not running.*chown docker:docker`), - Advice: "minikube is not yet compatible with ChromeOS", - Issues: []int{6411}, - }, - "DOCKER_PROVISION_STUCK_CONTAINER": { - Regexp: re(`executing "" at `), - Advice: "Restart Docker, Ensure docker is running and then run: 'minikube delete' and then 'minikube start' again", - URL: "https://github.com/kubernetes/minikube/issues/8163#issuecomment-652627436", - Issues: []int{8163}, - }, - // Hyperkit - "HYPERKIT_NO_IP": { - Regexp: re(`IP address never found in dhcp leases file Temporary Error: Could not find an IP address for`), - Advice: "Install the latest hyperkit binary, and run 'minikube delete'", - URL: "https://minikube.sigs.k8s.io/docs/reference/drivers/hyperkit/", - Issues: []int{1926, 4206}, - GOOS: []string{"darwin"}, - }, - "HYPERKIT_NOT_FOUND": { - Regexp: re(`Driver "hyperkit" not found.`), - Advice: "Please install the minikube hyperkit VM driver, or select an alternative --driver", - URL: "https://minikube.sigs.k8s.io/docs/reference/drivers/hyperkit/", - GOOS: []string{"darwin"}, - }, - "HYPERKIT_VMNET_FRAMEWORK": { - Regexp: re(`error from vmnet.framework: -1`), - Advice: "Hyperkit networking is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver", - Issues: []int{6028, 5594}, - GOOS: []string{"darwin"}, - }, - "HYPERKIT_CRASHED": { - Regexp: re(`hyperkit crashed!`), - Advice: "Hyperkit is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver", - Issues: []int{6079, 5780}, - GOOS: []string{"darwin"}, - }, - // Hyper-V - "HYPERV_NO_VSWITCH": { - Regexp: re(`no External vswitch found. A valid vswitch must be available for this command to run.`), - Advice: "Configure an external network switch following the official documentation, then add `--hyperv-virtual-switch=` to `minikube start`", - URL: "https://docs.docker.com/machine/drivers/hyper-v/", - GOOS: []string{"windows"}, - }, - "HYPERV_VSWITCH_NOT_FOUND": { - Regexp: re(`precreate: vswitch.*not found`), - Advice: "Confirm that you have supplied the correct value to --hyperv-virtual-switch using the 'Get-VMSwitch' command", - URL: "https://docs.docker.com/machine/drivers/hyper-v/", - GOOS: []string{"windows"}, - }, - "HYPERV_POWERSHELL_NOT_FOUND": { - Regexp: re(`Powershell was not found in the path`), - Advice: "To start minikube with Hyper-V, Powershell must be in your PATH`", - URL: "https://docs.docker.com/machine/drivers/hyper-v/", - GOOS: []string{"windows"}, - }, - "HYPERV_AS_ADMIN": { - Regexp: re(`Hyper-v commands have to be run as an Administrator`), - Advice: "Right-click the PowerShell icon and select Run as Administrator to open PowerShell in elevated mode.", - URL: "https://rominirani.com/docker-machine-windows-10-hyper-v-troubleshooting-tips-367c1ea73c24", - Issues: []int{4511}, - GOOS: []string{"windows"}, - }, - "HYPERV_NEEDS_ESC": { - Regexp: re(`The requested operation requires elevation.`), - Advice: "Right-click the PowerShell icon and select Run as Administrator to open PowerShell in elevated mode.", - Issues: []int{7347}, - GOOS: []string{"windows"}, - }, - "HYPERV_FILE_DELETE_FAILURE": { - Regexp: re(`Unable to remove machine directory`), - Advice: "You may need to stop the Hyper-V Manager and run `minikube delete` again.", - Issues: []int{6804}, - GOOS: []string{"windows"}, - }, - // KVM - "KVM2_NOT_FOUND": { - Regexp: re(`Driver "kvm2" not found. Do you have the plugin binary .* accessible in your PATH`), - Advice: "Please install the minikube kvm2 VM driver, or select an alternative --driver", - URL: "https://minikube.sigs.k8s.io/docs/reference/drivers/kvm2/", - GOOS: []string{"linux"}, - }, - "KVM2_NO_DOMAIN": { - Regexp: re(`no domain with matching name`), - Advice: "The VM that minikube is configured for no longer exists. Run 'minikube delete'", - Issues: []int{3636}, - GOOS: []string{"linux"}, - }, - "KVM_CREATE_CONFLICT": { - Regexp: re(`KVM_CREATE_VM.* failed:.* Device or resource busy`), - Advice: "Another hypervisor, such as VirtualBox, is conflicting with KVM. Please stop the other hypervisor, or use --driver to switch to it.", - Issues: []int{4913}, - GOOS: []string{"linux"}, - }, - "KVM2_RESTART_NO_IP": { - Regexp: re(`Error starting stopped host: Machine didn't return an IP after \d+ seconds`), - Advice: "The KVM driver is unable to resurrect this old VM. Please run `minikube delete` to delete it and try again.", - Issues: []int{3901, 3434}, - }, - "KVM2_START_NO_IP": { - Regexp: re(`Error in driver during machine creation: Machine didn't return an IP after \d+ seconds`), - Advice: "Check your firewall rules for interference, and run 'virt-host-validate' to check for KVM configuration issues. If you are running minikube within a VM, consider using --driver=none", - URL: "https://minikube.sigs.k8s.io/docs/reference/drivers/kvm2/", - Issues: []int{4249, 3566}, - GOOS: []string{"linux"}, - }, - "KVM2_NETWORK_DEFINE_XML": { - Regexp: re(`not supported by the connection driver: virNetworkDefineXML`), - Advice: "Rebuild libvirt with virt-network support", - URL: "https://forums.gentoo.org/viewtopic-t-981692-start-0.html", - Issues: []int{4195}, - GOOS: []string{"linux"}, - }, - "KVM2_FAILED_MSR": { - Regexp: re(`qemu unexpectedly closed the monitor.*failed to set MSR`), - Advice: "Upgrade to QEMU v3.1.0+, run 'virt-host-validate', or ensure that you are not running in a nested VM environment.", - Issues: []int{4277}, - GOOS: []string{"linux"}, - }, - "KVM_UNAVAILABLE": { - Regexp: re(`invalid argument: could not find capabilities for domaintype=kvm`), - Advice: "Your host does not support KVM virtualization. Ensure that qemu-kvm is installed, and run 'virt-host-validate' to debug the problem", - URL: "http://mikko.repolainen.fi/documents/virtualization-with-kvm", - Issues: []int{2991}, - GOOS: []string{"linux"}, - }, - "KVM_CONNECTION_ERROR": { - Regexp: re(`error connecting to libvirt socket`), - Advice: "Have you set up libvirt correctly?", - URL: "https://minikube.sigs.k8s.io/docs/reference/drivers/kvm2/", - GOOS: []string{"linux"}, - }, - "KVM_ISO_PERMISSION": { - Regexp: re(`boot2docker.iso.*Permission denied`), - Advice: "Ensure that the user listed in /etc/libvirt/qemu.conf has access to your home directory", - GOOS: []string{"linux"}, - Issues: []int{5950}, - }, - "KVM_OOM": { - Regexp: re(`cannot set up guest memory.*Cannot allocate memory`), - Advice: "Choose a smaller value for --memory, such as 2000", - GOOS: []string{"linux"}, - Issues: []int{6366}, - }, - // None - "NONE_APISERVER_MISSING": { - Regexp: re(`apiserver process never appeared`), - Advice: "Check that SELinux is disabled, and that the provided apiserver flags are valid", - Issues: []int{6014, 4536}, - GOOS: []string{"linux"}, - }, - "NONE_DOCKER_EXIT_1": { - Regexp: re(`sudo systemctl start docker: exit status 1`), - Advice: "Either systemctl is not installed, or Docker is broken. Run 'sudo systemctl start docker' and 'journalctl -u docker'", - URL: "https://minikube.sigs.k8s.io/docs/reference/drivers/none", - Issues: []int{4498}, - GOOS: []string{"linux"}, - }, - "NONE_DOCKER_EXIT_5": { - Regexp: re(`sudo systemctl start docker: exit status 5`), - Advice: "Ensure that Docker is installed and healthy: Run 'sudo systemctl start docker' and 'journalctl -u docker'. Alternatively, select another value for --driver", - URL: "https://minikube.sigs.k8s.io/docs/reference/drivers/none", - Issues: []int{5532}, - GOOS: []string{"linux"}, - }, - "NONE_CRIO_EXIT_5": { - Regexp: re(`sudo systemctl restart crio: exit status 5`), - Advice: "Ensure that CRI-O is installed and healthy: Run 'sudo systemctl start crio' and 'journalctl -u crio'. Alternatively, use --container-runtime=docker", - URL: "https://minikube.sigs.k8s.io/docs/reference/drivers/none", - Issues: []int{5532}, - GOOS: []string{"linux"}, - }, - "NONE_PORT_IN_USE": { - Regexp: re(`ERROR Port-.*is in use`), - Advice: "kubeadm detected a TCP port conflict with another process: probably another local Kubernetes installation. Run lsof -p to find the process and kill it", - Issues: []int{5484}, - GOOS: []string{"linux"}, - }, - "NONE_KUBELET": { - Regexp: re(`The kubelet is not running`), - Advice: "Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start", - Issues: []int{4172}, - GOOS: []string{"linux"}, - }, - "NONE_DEFAULT_ROUTE": { - Regexp: re(`(No|from) default routes`), - Advice: "Configure a default route on this Linux host, or use another --driver that does not require it", - Issues: []int{6083, 5636}, - GOOS: []string{"linux"}, - }, - // VirtualBox - "VBOX_BLOCKED": { - Regexp: re(`NS_ERROR_FAILURE.*0x80004005`), - Advice: "Reinstall VirtualBox and verify that it is not blocked: System Preferences -> Security & Privacy -> General -> Some system software was blocked from loading", - Issues: []int{4107}, - GOOS: []string{"darwin"}, - }, - "VBOX_DRV_NOT_LOADED": { - Regexp: re(`vboxdrv kernel module is not loaded`), - Advice: "Reinstall VirtualBox and reboot. Alternatively, try the kvm2 driver: https://minikube.sigs.k8s.io/docs/reference/drivers/kvm2/", - - Issues: []int{4043, 4711}, - }, - "VBOX_DEVICE_MISSING": { - Regexp: re(`vboxdrv does not exist`), - Advice: "Reinstall VirtualBox and reboot. Alternatively, try the kvm2 driver: https://minikube.sigs.k8s.io/docs/reference/drivers/kvm2/", - Issues: []int{3974}, - }, - "VBOX_HARDENING": { - Regexp: re(`terminated unexpectedly.*VBoxHardening`), - Advice: "VirtualBox is broken. Disable real-time anti-virus software, reboot, and reinstall VirtualBox if the problem continues.", - Issues: []int{3859, 3910}, - URL: "https://forums.virtualbox.org/viewtopic.php?f=25&t=82106", - GOOS: []string{"windows"}, - }, - "VBOX_NS_ERRROR": { - Regexp: re(`terminated unexpectedly.*NS_ERROR_FAILURE.*0x80004005`), - Advice: "VirtualBox is broken. Reinstall VirtualBox, reboot, and run 'minikube delete'.", - Issues: []int{5227}, - GOOS: []string{"linux"}, - }, - "VBOX_HOST_ADAPTER": { - Regexp: re(`The host-only adapter we just created is not visible`), - Advice: "Reboot to complete VirtualBox installation, verify that VirtualBox is not blocked by your system, and/or use another hypervisor", - Issues: []int{3614, 4222, 5817}, - URL: "https://stackoverflow.com/questions/52277019/how-to-fix-vm-issue-with-minikube-start", - }, - "VBOX_IP_CONFLICT": { - Regexp: re(`VirtualBox is configured with multiple host-only adapters with the same IP`), - Advice: "Use VirtualBox to remove the conflicting VM and/or network interfaces", - URL: "https://stackoverflow.com/questions/55573426/virtualbox-is-configured-with-multiple-host-only-adapters-with-the-same-ip-whe", - Issues: []int{3584}, - }, - "VBOX_HYPERV_64_BOOT": { - Regexp: re(`VirtualBox won't boot a 64bits VM when Hyper-V is activated`), - Advice: "VirtualBox and Hyper-V are having a conflict. Use '--driver=hyperv' or disable Hyper-V using: 'bcdedit /set hypervisorlaunchtype off'", - Issues: []int{4051, 4783}, - }, - "VBOX_HYPERV_NEM_VM": { - Regexp: re(`vrc=VERR_NEM_VM_CREATE_FAILED`), - Advice: "VirtualBox and Hyper-V are having a conflict. Use '--driver=hyperv' or disable Hyper-V using: 'bcdedit /set hypervisorlaunchtype off'", - Issues: []int{4587}, - }, - "VBOX_NOT_FOUND": { - Regexp: re(`VBoxManage not found. Make sure VirtualBox is installed and VBoxManage is in the path`), - Advice: "Install VirtualBox and ensure it is in the path, or select an alternative value for --driver", - URL: "https://minikube.sigs.k8s.io/docs/start/", - Issues: []int{3784}, - }, - "VBOX_NO_VM": { - Regexp: re(`Could not find a registered machine named`), - Advice: "The VM that minikube is configured for no longer exists. Run 'minikube delete'", - Issues: []int{4694}, - }, - "VBOX_VTX_DISABLED": { - Regexp: re(`This computer doesn't have VT-X/AMD-v enabled`), - Advice: "Virtualization support is disabled on your computer. If you are running minikube within a VM, try '--driver=docker'. Otherwise, consult your systems BIOS manual for how to enable virtualization.", - Issues: []int{3900, 4730}, - }, - "VERR_VERR_VMX_DISABLED": { - Regexp: re(`VT-x is disabled.*VERR_VMX_MSR_ALL_VMX_DISABLED`), - Advice: "Virtualization support is disabled on your computer. If you are running minikube within a VM, try '--driver=docker'. Otherwise, consult your systems BIOS manual for how to enable virtualization.", - Issues: []int{5282, 5456}, - }, - "VBOX_VERR_VMX_NO_VMX": { - Regexp: re(`VT-x is not available.*VERR_VMX_NO_VMX`), - Advice: "Your host does not support virtualization. If you are running minikube within a VM, try '--driver=docker'. Otherwise, enable virtualization in your BIOS", - Issues: []int{1994, 5326}, - }, - "VERR_SVM_DISABLED": { - Regexp: re(`VERR_SVM_DISABLED`), - Advice: "Your host does not support virtualization. If you are running minikube within a VM, try '--driver=docker'. Otherwise, enable virtualization in your BIOS", - Issues: []int{7074}, - }, - "VBOX_HOST_NETWORK": { - Regexp: re(`Error setting up host only network on machine start.*Unspecified error`), - Advice: "VirtualBox cannot create a network, probably because it conflicts with an existing network that minikube no longer knows about. Try running 'minikube delete'", - Issues: []int{5260}, - }, - "VBOX_INTERFACE_NOT_FOUND": { - Regexp: re(`ERR_INTNET_FLT_IF_NOT_FOUND`), - Advice: "VirtualBox is unable to find its network interface. Try upgrading to the latest release and rebooting.", - Issues: []int{6036}, - }, -} - -// proxyDoc is the URL to proxy documentation -const proxyDoc = "https://minikube.sigs.k8s.io/docs/handbook/vpn_and_proxy/" -const vpnDoc = "https://minikube.sigs.k8s.io/docs/handbook/vpn_and_proxy/" - -// netProblems are network related problems. -var netProblems = map[string]match{ - "GCR_UNAVAILABLE": { - Regexp: re(`gcr.io.*443: connect: invalid argument`), - Advice: "minikube is unable to access the Google Container Registry. You may need to configure it to use a HTTP proxy.", - URL: proxyDoc, - Issues: []int{3860}, - }, - "DOWNLOAD_RESET_BY_PEER": { - Regexp: re(`Error downloading .*connection reset by peer`), - Advice: "A firewall is likely blocking minikube from reaching the internet. You may need to configure minikube to use a proxy.", - URL: proxyDoc, - Issues: []int{3909}, - }, - "DOWNLOAD_IO_TIMEOUT": { - Regexp: re(`Error downloading .*timeout`), - Advice: "A firewall is likely blocking minikube from reaching the internet. You may need to configure minikube to use a proxy.", - URL: proxyDoc, - Issues: []int{3846}, - }, - "DOWNLOAD_TLS_OVERSIZED": { - Regexp: re(`tls: oversized record received with length`), - Advice: "A firewall is interfering with minikube's ability to make outgoing HTTPS requests. You may need to change the value of the HTTPS_PROXY environment variable.", - URL: proxyDoc, - Issues: []int{3857, 3759, 4252}, - }, - "DOWNLOAD_BLOCKED": { - Regexp: re(`iso: failed to download|download.*host has failed to respond`), - Advice: "A firewall is likely blocking minikube from reaching the internet. You may need to configure minikube to use a proxy.", - URL: proxyDoc, - Issues: []int{3922, 6109, 6123}, - }, - "PULL_TIMEOUT_EXCEEDED": { - Regexp: re(`ImagePull.*Timeout exceeded while awaiting headers`), - Advice: "A firewall is blocking Docker the minikube VM from reaching the image repository. You may need to select --image-repository, or use a proxy.", - URL: proxyDoc, - Issues: []int{3898, 6070}, - }, - "SSH_AUTH_FAILURE": { - Regexp: re(`ssh: handshake failed: ssh: unable to authenticate.*, no supported methods remain`), - Advice: "Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.", - URL: vpnDoc, - Issues: []int{3930}, - }, - "SSH_TCP_FAILURE": { - Regexp: re(`dial tcp .*:22: connectex: A connection attempt failed because the connected party did not properly respond`), - Advice: "Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.", - URL: vpnDoc, - Issues: []int{3388}, - }, - "INVALID_PROXY_HOSTNAME": { - Regexp: re(`dial tcp: lookup.*: no such host`), - Advice: "Verify that your HTTP_PROXY and HTTPS_PROXY environment variables are set correctly.", - URL: proxyDoc, - }, - "HOST_CIDR_CONFLICT": { - Regexp: re(`host-only cidr conflicts with the network address of a host interface`), - Advice: "Specify an alternate --host-only-cidr value, such as 172.16.0.1/24", - Issues: []int{3594}, - }, - "HTTP_HTTPS_RESPONSE": { - Regexp: re(`http: server gave HTTP response to HTTPS client`), - Advice: "Ensure that your value for HTTPS_PROXY points to an HTTPS proxy rather than an HTTP proxy", - Issues: []int{6107}, - URL: proxyDoc, - }, - "NOT_A_TLS_HANDSHAKE": { - Regexp: re(`tls: first record does not look like a TLS handshake`), - Advice: "Ensure that your value for HTTPS_PROXY points to an HTTPS proxy rather than an HTTP proxy", - Issues: []int{7286}, - URL: proxyDoc, - }, -} - -// deployProblems are Kubernetes deployment problems. -var deployProblems = map[string]match{ - "DOCKER_UNAVAILABLE": { - Regexp: re(`Error configuring auth on host: OS type not recognized`), - Advice: "Docker inside the VM is unavailable. Try running 'minikube delete' to reset the VM.", - Issues: []int{3952}, - }, - "INVALID_KUBERNETES_VERSION": { - Regexp: re(`No Major.Minor.Patch elements found`), - Advice: "Specify --kubernetes-version in v. form. example: 'v1.1.14'", - }, - "KUBERNETES_VERSION_MISSING_V": { - Regexp: re(`strconv.ParseUint: parsing "": invalid syntax`), - Advice: "Check that your --kubernetes-version has a leading 'v'. For example: 'v1.1.14'", - }, - "APISERVER_MISSING": { - Regexp: re(`apiserver process never appeared`), - Advice: "Check that the provided apiserver flags are valid, and that SELinux is disabled", - Issues: []int{4536, 6014}, - }, - "APISERVER_TIMEOUT": { - Regexp: re(`apiserver: timed out waiting for the condition`), - Advice: "A VPN or firewall is interfering with HTTP access to the minikube VM. Alternatively, try a different VM driver: https://minikube.sigs.k8s.io/docs/start/", - URL: vpnDoc, - Issues: []int{4302}, - }, - "DNS_TIMEOUT": { - Regexp: re(`dns: timed out waiting for the condition`), - Advice: "Run 'kubectl describe pod coredns -n kube-system' and check for a firewall or DNS conflict", - URL: vpnDoc, - }, - "SERVICE_NOT_FOUND": { - Regexp: re(`Could not find finalized endpoint being pointed to by`), - Advice: "Please make sure the service you are looking for is deployed or is in the correct namespace.", - Issues: []int{4599}, - }, - "OPEN_SERVICE_NOT_FOUND": { - Regexp: re(`Error opening service.*not found`), - Advice: "Use 'kubect get po -A' to find the correct and namespace name", - Issues: []int{5836}, - }, - "OOM_KILL_SSH": { - Regexp: re(`Process exited with status 137 from signal KILL`), - Advice: "Disable dynamic memory in your VM manager, or pass in a larger --memory value", - Issues: []int{1766}, - }, - "OOM_KILL_SCP": { - Regexp: re(`An existing connection was forcibly closed by the remote host`), - Advice: "Disable dynamic memory in your VM manager, or pass in a larger --memory value", - Issues: []int{1766}, - }, - "PROXY_UNEXPECTED_503": { - Regexp: re(`proxy.*unexpected response code: 503`), - Advice: "Confirm that you have a working internet connection and that your VM has not run out of resources by using: 'minikube logs'", - Issues: []int{4749}, - }, - "CERT_NOT_SIGNED_BY_CA": { - Regexp: re(`not signed by CA certificate ca: crypto/rsa: verification error`), - Advice: "Try 'minikube delete' to force new SSL certificates to be installed", - Issues: []int{6596}, - }, - "DOCKER_RESTART_FAILED": { - Regexp: re(`systemctl -f restart docker`), - Advice: "Remove the incompatible --docker-opt flag if one was provided", - Issues: []int{7070}, - }, - "WAITING_FOR_SSH": { - Regexp: re(`waiting for SSH to be available`), - Advice: "Try 'minikube delete', and disable any conflicting VPN or firewall software", - Issues: []int{4617}, - }, -} - -// osProblems are operating-system specific issues -var osProblems = map[string]match{ - "NON_C_DRIVE": { - Regexp: re(`.iso: The system cannot find the path specified.`), - Advice: "Run minikube from the C: drive.", - Issues: []int{1574}, - }, - "SYSTEMCTL_EXIT_1": { - Regexp: re(`Failed to enable container runtime: .*sudo systemctl start docker: exit status 1`), - Advice: "If using the none driver, ensure that systemctl is installed", - URL: "https://minikube.sigs.k8s.io/docs/reference/drivers/none/", - Issues: []int{2704}, - }, - "KUBECONFIG_WRITE_FAIL": { - Regexp: re(`Failed to setup kubeconfig: writing kubeconfig`), - Advice: "Unset the KUBECONFIG environment variable, or verify that it does not point to an empty or otherwise invalid path", - Issues: []int{5268, 4100, 5207}, - }, - "KUBECONFIG_DENIED": { - Regexp: re(`.kube/config: permission denied`), - Advice: "Run: 'chmod 600 $HOME/.kube/config'", - GOOS: []string{"darwin", "linux"}, - Issues: []int{5714}, - }, - "JUJU_LOCK_DENIED": { - Regexp: re(`unable to open /tmp/juju.*: permission denied`), - Advice: "Run 'sudo sysctl fs.protected_regular=0', or try a driver which does not require root, such as '--driver=docker'", - GOOS: []string{"linux"}, - Issues: []int{6391}, - }, -} - -// stateProblems are issues relating to local state -var stateProblems = map[string]match{ - "MACHINE_DOES_NOT_EXIST": { - Regexp: re(`machine does not exist`), - Advice: "Run 'minikube delete' to delete the stale VM, or and ensure that minikube is running as the same user you are issuing this command with", - Issues: []int{3864, 6087}, - }, - "MACHINE_NOT_FOUND": { - Regexp: re(`Machine does not exist for api.Exists`), - Advice: "Your minikube vm is not running, try minikube start.", - Issues: []int{4889}, - }, - "IP_NOT_FOUND": { - Regexp: re(`Error getting ssh host name for driver: IP not found`), - Advice: "The minikube VM is offline. Please run 'minikube start' to start it again.", - Issues: []int{3849, 3648}, - }, - "DASHBOARD_ROLE_REF": { - Regexp: re(`dashboard.*cannot change roleRef`), - Advice: "Run: 'kubectl delete clusterrolebinding kubernetes-dashboard'", - Issues: []int{7256}, - }, -} - -// dockerProblems are issues relating to issues with the docker driver -var dockerProblems = map[string]match{ - "NO_SPACE_ON_DEVICE": { - Regexp: re(`.*docker.*No space left on device.*`), - Advice: `Try at least one of the following to free up space on the device: - - 1. Run "docker system prune" to remove unused docker data - 2. Increase the amount of memory allocated to Docker for Desktop via - Docker icon > Preferences > Resources > Disk Image Size - 3. Run "minikube ssh -- docker system prune" if using the docker container runtime -`, - Issues: []int{9024}, - }, -} diff --git a/pkg/minikube/problem/problem.go b/pkg/minikube/problem/problem.go deleted file mode 100644 index e859eaf534..0000000000 --- a/pkg/minikube/problem/problem.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package problem helps deliver actionable feedback to a user based on an error message. -package problem - -import ( - "fmt" - "regexp" - - "k8s.io/minikube/pkg/minikube/out" - "k8s.io/minikube/pkg/minikube/out/register" - "k8s.io/minikube/pkg/minikube/translate" -) - -const issueBase = "https://github.com/kubernetes/minikube/issues" - -// Problem represents a known problem in minikube. -type Problem struct { - // ID is an arbitrary unique and stable string describing this issue - ID string - // Err is the original error - Err error - // Advice is actionable text that the user should follow - Advice string - // URL is a reference URL for more information - URL string - // Issues are a list of related issues to this problem - Issues []int - // Hide the new issue link: it isn't our problem, and we won't be able to suggest additional assistance. - ShowIssueLink bool -} - -// match maps a regular expression to problem metadata. -type match struct { - Regexp *regexp.Regexp - Advice string - URL string - Issues []int - // GOOS is what platforms this problem may be specific to, when disambiguation is necessary. - GOOS []string - // Hide the new issue link: it isn't our problem, and we won't be able to suggest additional assistance. - ShowIssueLink bool -} - -// Display problem metadata to the console -func (p *Problem) Display() { - out.ErrT(out.Tip, "Suggestion: {{.advice}}", out.V{"advice": translate.T(p.Advice)}) - if p.URL != "" { - out.ErrT(out.Documentation, "Documentation: {{.url}}", out.V{"url": p.URL}) - } - if len(p.Issues) == 0 { - return - } - - if len(p.Issues) == 1 { - out.ErrT(out.Issues, "Related issue: {{.url}}", out.V{"url": fmt.Sprintf("%s/%d", issueBase, p.Issues[0])}) - return - } - - out.ErrT(out.Issues, "Related issues:") - issues := p.Issues - if len(issues) > 3 { - issues = issues[0:3] - } - for _, i := range issues { - out.ErrT(out.Issue, "{{.url}}", out.V{"url": fmt.Sprintf("%s/%d", issueBase, i)}) - } -} - -// DisplayJSON displays problem metadata in JSON format -func (p *Problem) DisplayJSON(exitcode int) { - var issues string - for _, i := range p.Issues { - issues += fmt.Sprintf("https://github.com/kubernetes/minikube/issues/%v,", i) - } - extraArgs := map[string]string{ - "name": p.ID, - "advice": p.Advice, - "url": p.URL, - "issues": issues, - } - register.PrintErrorExitCode(p.Err.Error(), exitcode, extraArgs) -} - -// FromError returns a known problem from an error on an OS -func FromError(err error, goos string) *Problem { - maps := []map[string]match{ - osProblems, - vmProblems, - netProblems, - deployProblems, - stateProblems, - dockerProblems, - } - - var osMatch *Problem - var genericMatch *Problem - - for _, m := range maps { - for id, match := range m { - if !match.Regexp.MatchString(err.Error()) { - continue - } - - // Does this match require an OS matchup? - if len(match.GOOS) > 0 { - foundOS := false - for _, o := range match.GOOS { - if o == goos { - foundOS = true - } - } - if !foundOS { - continue - } - } - - p := &Problem{ - Err: err, - Advice: match.Advice, - URL: match.URL, - ID: id, - Issues: match.Issues, - ShowIssueLink: match.ShowIssueLink, - } - - if len(match.GOOS) > 0 { - osMatch = p - } else { - genericMatch = p - } - } - } - - // Prioritize operating-system specific matches over general ones - if osMatch != nil { - return osMatch - } - return genericMatch -} diff --git a/pkg/minikube/problem/problem_test.go b/pkg/minikube/problem/problem_test.go deleted file mode 100644 index aa2ce233c9..0000000000 --- a/pkg/minikube/problem/problem_test.go +++ /dev/null @@ -1,206 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package problem - -import ( - "bytes" - "fmt" - "os" - "strings" - "testing" - - "k8s.io/minikube/pkg/minikube/out" - "k8s.io/minikube/pkg/minikube/out/register" -) - -type buffFd struct { - bytes.Buffer - uptr uintptr -} - -func (b buffFd) Fd() uintptr { return b.uptr } - -func TestDisplay(t *testing.T) { - buffErr := buffFd{} - out.SetErrFile(&buffErr) - var tests = []struct { - description string - problem Problem - expected string - }{ - { - problem: Problem{ID: "example", URL: "example.com", Err: fmt.Errorf("test")}, - description: "url, id and err", - expected: ` -* Suggestion: -* Documentation: example.com -`, - }, - { - problem: Problem{ID: "example", URL: "example.com", Err: fmt.Errorf("test"), Issues: []int{0, 1}, Advice: "you need a hug"}, - description: "with 2 issues and suggestion", - expected: ` -* Suggestion: you need a hug -* Documentation: example.com -* Related issues: - - https://github.com/kubernetes/minikube/issues/0 - - https://github.com/kubernetes/minikube/issues/1 -`, - }, - { - problem: Problem{ID: "example", URL: "example.com", Err: fmt.Errorf("test"), Issues: []int{0, 1}}, - description: "with 2 issues", - expected: ` -* Suggestion: -* Documentation: example.com -* Related issues: - - https://github.com/kubernetes/minikube/issues/0 - - https://github.com/kubernetes/minikube/issues/1 -`, - }, - // 6 issues should be trimmed to 3 - { - problem: Problem{ID: "example", URL: "example.com", Err: fmt.Errorf("test"), Issues: []int{0, 1, 2, 3, 4, 5}}, - description: "with 6 issues", - expected: ` -* Suggestion: -* Documentation: example.com -* Related issues: - - https://github.com/kubernetes/minikube/issues/0 - - https://github.com/kubernetes/minikube/issues/1 - - https://github.com/kubernetes/minikube/issues/2 -`, - }, - } - for _, tc := range tests { - t.Run(tc.description, func(t *testing.T) { - buffErr.Truncate(0) - tc.problem.Display() - errStr := buffErr.String() - if strings.TrimSpace(errStr) != strings.TrimSpace(tc.expected) { - t.Fatalf("Expected errString:\n%v\ngot:\n%v\n", tc.expected, errStr) - } - }) - } -} - -func TestDisplayJSON(t *testing.T) { - defer out.SetJSON(false) - out.SetJSON(true) - - tcs := []struct { - p *Problem - expected string - }{ - { - p: &Problem{ - Err: fmt.Errorf("my error"), - Advice: "fix me!", - Issues: []int{1, 2}, - URL: "url", - ID: "BUG", - }, - expected: `{"data":{"advice":"fix me!","exitcode":"4","issues":"https://github.com/kubernetes/minikube/issues/1,https://github.com/kubernetes/minikube/issues/2,","message":"my error","name":"BUG","url":"url"},"datacontenttype":"application/json","id":"random-id","source":"https://minikube.sigs.k8s.io/","specversion":"1.0","type":"io.k8s.sigs.minikube.error"} -`, - }, - } - for _, tc := range tcs { - t.Run(tc.p.ID, func(t *testing.T) { - buf := bytes.NewBuffer([]byte{}) - register.SetOutputFile(buf) - defer func() { register.SetOutputFile(os.Stdout) }() - - register.GetUUID = func() string { - return "random-id" - } - - tc.p.DisplayJSON(4) - actual := buf.String() - if actual != tc.expected { - t.Fatalf("expected didn't match actual:\nExpected:\n%v\n\nActual:\n%v", tc.expected, actual) - } - }) - } -} - -func TestFromError(t *testing.T) { - var tests = []struct { - issue int - os string - want string - err string - }{ - {0, "", "", "this is just a lame error message with no matches."}, - {2991, "linux", "KVM_UNAVAILABLE", "Unable to start VM: create: Error creating machine: Error in driver during machine creation: creating domain: Error defining domain xml:\n\n: virError(Code=8, Domain=44, Message='invalid argument: could not find capabilities for domaintype=kvm ')"}, - {3594, "", "HOST_CIDR_CONFLICT", "Error starting host: Error starting stopped host: Error setting up host only network on machine start: host-only cidr conflicts with the network address of a host interface."}, - {3614, "", "VBOX_HOST_ADAPTER", "Error starting host: Error starting stopped host: Error setting up host only network on machine start: The host-only adapter we just created is not visible. This is a well known VirtualBox bug. You might want to uninstall it and reinstall at least version 5.0.12 that is supposed to fix this issue"}, - {3784, "", "VBOX_NOT_FOUND", "create: precreate: VBoxManage not found. Make sure VirtualBox is installed and VBoxManage is in the path"}, - {3849, "", "IP_NOT_FOUND", "bootstrapper: Error creating new ssh host from driver: Error getting ssh host name for driver: IP not found"}, - {3859, "windows", "VBOX_HARDENING", `Unable to start VM: create: creating: Unable to start the VM: C:\Program Files\Oracle\VirtualBox\VBoxManage.exe startvm minikube --type headless failed: -VBoxManage.exe: error: The virtual machine 'minikube' has terminated unexpectedly during startup with exit code -1073741819 (0xc0000005). More details may be available in 'C:\Users\pabitra_b.minikube\machines\minikube\minikube\Logs\VBoxHardening.log' -VBoxManage.exe: error: Details: code E_FAIL (0x80004005), component MachineWrap, interface IMachine`}, - {3922, "", "DOWNLOAD_BLOCKED", `unable to cache ISO: https://storage.googleapis.com/minikube/iso/minikube-v0.35.0.iso: failed to download: failed to download to temp file: download failed: 5 error(s) occurred: -* Temporary download error: Get https://storage.googleapis.com/minikube/iso/minikube-v0.35.0.iso: dial tcp 216.58.207.144:443: connectex: A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond.`}, - {4107, "darwin", "VBOX_BLOCKED", "Result Code: NS_ERROR_FAILURE (0x80004005)"}, - {4302, "", "APISERVER_TIMEOUT", "apiserver: timed out waiting for the condition"}, - {4252, "", "DOWNLOAD_TLS_OVERSIZED", "Failed to update cluster: downloading binaries: downloading kubeadm: Error downloading kubeadm v1.14.1: failed to download: failed to download to temp file: download failed: 5 error(s) occurred:\n\nTemporary download error: Get https://storage.googleapis.com/kubernetes-release/release/v1.14.1/bin/linux/amd64/kubeadm: proxyconnect tcp: tls: oversized record received with length 20527"}, - {4222, "", "VBOX_HOST_ADAPTER", "Unable to start VM: create: creating: Error setting up host only network on machine start: The host-only adapter we just created is not visible. This is a well known VirtualBox bug. You might want to uninstall it and reinstall at least version 5.0.12 that is supposed to fix this issue"}, - {6014, "linux", "NONE_APISERVER_MISSING", "Error restarting cluster: waiting for apiserver: apiserver process never appeared"}, - {5836, "", "OPEN_SERVICE_NOT_FOUND", `Error opening service: Service newservice was not found in "unknown" namespace. You may select another namespace by using 'minikube service newservice -n : Temporary Error: Error getting service newservice: services "newservice" not found`}, - {6087, "", "MACHINE_DOES_NOT_EXIST", `Error getting machine status: state: machine does not exist`}, - {5714, "darwin", "KUBECONFIG_DENIED", `Failed to setup kubeconfig: writing kubeconfig: Error writing file /Users/matthewgleich/.kube/config: error writing file /Users/matthewgleich/.kube/config: open /Users/matthewgleich/.kube/config: permission denied`}, - {5532, "linux", "NONE_DOCKER_EXIT_5", `Failed to enable container runtime: running command: sudo systemctl start docker: exit status 5`}, - {5532, "linux", "NONE_CRIO_EXIT_5", `Failed to enable container runtime: running command: sudo systemctl restart crio: exit status 5`}, - {5484, "linux", "NONE_PORT_IN_USE", `[ERROR Port-10252]: Port 10252 is in use`}, - {4913, "linux", "KVM_CREATE_CONFLICT", `Unable to start VM: create: Error creating machine: Error in driver during machine creation: error creating VM: virError(Code=1, Domain=10, Message='internal error: process exited while connecting to monitor: ioctl(KVM_CREATE_VM) failed: 16 Device or resource busy`}, - {5950, "linux", "KVM_ISO_PERMISSION", `Retriable failure: create: Error creating machine: Error in driver during machine creation: error creating VM: virError(Code=1, Domain=10, Message='internal error: qemu unexpectedly closed the monitor: 2019-11-19T16:08:16.757609Z qemu-kvm: -drive file=/home/lnicotra/.minikube/machines/minikube/boot2docker.iso,format=raw,if=none,id=drive-scsi0-0-0-2,readonly=on: could not open disk image /home/lnicotra/.minikube/machines/minikube/boot2docker.iso: Could not open '/home/lnicotra/.minikube/machines/minikube/boot2docker.iso': Permission denied'`}, - {5836, "", "OPEN_SERVICE_NOT_FOUND", `Error opening service: Service kubernetes-bootcamp was not found in "default" namespace. You may select another namespace by using 'minikube service kubernetes-bootcamp -n : Temporary Error: Error getting service kubernetes-bootcamp: services "kubernetes-bootcamp" not found`}, - {3898, "", "PULL_TIMEOUT_EXCEEDED", `[ERROR ImagePull]: failed to pull image k8s.gcr.io/kube-controller-manager:v1.17.0: output: Error response from daemon: Get https://k8s.gcr.io/v2/: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)`}, - {6079, "darwin", "HYPERKIT_CRASHED", `Error creating machine: Error in driver during machine creation: hyperkit crashed! command line:`}, - {5636, "linux", "NONE_DEFAULT_ROUTE", `Unable to get VM IP address: unable to select an IP from default routes.`}, - {6087, "", "MACHINE_DOES_NOT_EXIST", `Error getting host status: state: machine does not exist`}, - {6098, "windows", "PRECREATE_EXIT_1", `Retriable failure: create: precreate: exit status 1`}, - {6107, "", "HTTP_HTTPS_RESPONSE", `http: server gave HTTP response to HTTPS client`}, - {6109, "", "DOWNLOAD_BLOCKED", `Failed to update cluster: downloading binaries: downloading kubelet: Error downloading kubelet v1.16.2: failed to download: failed to download to temp file: failed to copy contents: read tcp 192.168.0.106:61314->172.217.166.176:443: wsarecv: A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond.`}, - {6109, "", "DOWNLOAD_BLOCKED", `Failed to update cluster: downloading binaries: downloading kubeadm: Error downloading kubeadm v1.17.0: failed to download: failed to download to temp file: failed to copy contents: read tcp [2606:a000:81c5:1e00:349a:26c0:7ea6:bbf1]:55317->[2607:f8b0:4004:815::2010]:443: wsarecv: A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond.`}, - {4277, "linux", "KVM2_FAILED_MSR", `Unable to start VM: start: Error creating VM: virError(Code=1, Domain=10, Message='internal error: qemu unexpectedly closed the monitor: 2019-05-17T02:20:07.980140Z qemu-system-x86_64: error: failed to set MSR 0x38d to 0x0 qemu-system-x86_64: /build/qemu-lXHhGe/qemu-2.11+dfsg/target/i386/kvm.c:1807: kvm_put_msrs: Assertion ret == cpu->kvm_msr_buf->nmsrs failed.`}, - } - for _, tc := range tests { - t.Run(tc.want, func(t *testing.T) { - got := FromError(fmt.Errorf(tc.err), tc.os) - if got == nil { - if tc.want != "" { - t.Errorf("FromError(%q)=nil, want %s", tc.err, tc.want) - } - return - } - if got.ID != tc.want { - t.Errorf("FromError(%q)=%s, want %s", tc.err, got.ID, tc.want) - } - - found := false - for _, i := range got.Issues { - if i == tc.issue { - found = true - } - } - if !found { - t.Errorf("Issue %d is not listed in %+v", tc.issue, got.Issues) - } - }) - } -} diff --git a/pkg/minikube/reason/exitcodes.go b/pkg/minikube/reason/exitcodes.go new file mode 100644 index 0000000000..1cfd94c0b3 --- /dev/null +++ b/pkg/minikube/reason/exitcodes.go @@ -0,0 +1,162 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Copyright 2019 TheExSvc Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reason + +const ( + // Reserved UNIX exit codes + ExFailure = 1 // Failure represents a general failure code + ExInterrupted = 2 // Ctrl-C (SIGINT) + + // 3-7 are reserved for crazy legacy codes returned by "minikube status" + + // How to assign new minikube exit codes: + // + // * Each error source is indexed from 10 onward, in general, it follows the dependency stack + // * For each error source, we roughly try to follow sysexits(3) for backwards compatibility + // + // errorOff = 0 // (~EX_SOFTWARE) + // conflictOff = 1 // (~EX_OSERR) + // timeoutOff = 2 // (~EX_INTERRUPTED) + // notRunningOff = 3 // custom + // usageOff = 4 // (~EX_USAGE) + // notFoundOff = 5 // (~EX_DATAERR) + // unsupportedOff = 6 // (~EX_PROTOCOL) + // permissionOff = 7 // (~EX_NOPERM) + // configOff = 8 // (~EX_CONFIG) + // navailableOff = 9 // (~EX_UNAVAILABLE) + + // Error codes specific to the minikube program + ExProgramError = 10 // generic error + ExProgramUsage = 14 // bad command-line options + ExProgramConflict = 11 // can't do what you want because of existing data + ExProgramNotFound = 15 // something was not found + ExProgramUnsupported = 16 // unsupported features + ExProgramConfig = 18 // bad configuration specified + + // Error codes specific to resource limits (exit code layout follows no rules) + ExResourceError = 20 + ExInsufficientMemory = 23 + ExInsufficientStorage = 26 + ExInsufficientPermission = 27 + ExInsufficientCores = 29 + + // Error codes specific to the host + ExHostError = 30 + ExHostConflict = 31 + ExHostTimeout = 32 + ExHostUsage = 34 + ExHostNotFound = 35 + ExHostUnsupported = 38 + ExHostPermission = 37 + ExHostConfig = 38 + + // Error codes specific to remote networking + ExInternetError = 40 + ExInternetConflict = 41 + ExInternetTimeout = 42 + ExInternetNotFound = 45 + ExInternetConfig = 48 + ExInternetUnavailable = 49 + + // Error codes specific to the libmachine driver + ExDriverError = 50 + ExDriverConflict = 51 + ExDriverTimeout = 52 + ExDriverUsage = 54 + ExDriverNotFound = 55 + ExDriverUnsupported = 56 + ExDriverPermission = 57 + ExDriverConfig = 58 + ExDriverUnavailable = 59 + + // Error codes specific to the driver provider + ExProviderError = 60 + ExProviderConflict = 61 + ExProviderTimeout = 62 + ExProviderNotRunning = 63 + // Reserve 64 for the moment as it used to be usage + ExProviderNotFound = 65 + ExProviderUnsupported = 66 + ExProviderPermission = 67 + ExProviderConfig = 68 + ExProviderUnavailable = 69 // In common use + + // Error codes specific to local networking + ExLocalNetworkError = 70 + ExLocalNetworkConflict = 71 + ExLocalNetworkTimeout = 72 + ExLocalNetworkNotFound = 75 + ExLocalNetworkPermission = 77 + ExLocalNetworkConfig = 78 + ExLocalNetworkUnavailable = 79 + + // Error codes specific to the guest host + ExGuestError = 80 + ExGuestConflict = 81 + ExGuestTimeout = 82 + ExGuestNotRunning = 83 + ExGuestNotFound = 85 + ExGuestUnsupported = 86 + ExGuestPermission = 87 + ExGuestConfig = 88 + ExGuestUnavailable = 89 + + // Error codes specific to the container runtime + ExRuntimeError = 90 + ExRuntimeNotRunning = 93 + ExRuntimeNotFound = 95 + ExRuntimeUnavailable = 99 + + // Error codes specific to the Kubernetes control plane + ExControlPlaneError = 100 + ExControlPlaneConflict = 101 + ExControlPlaneTimeout = 102 + ExControlPlaneNotRunning = 103 + ExControlPlaneNotFound = 105 + ExControlPlaneUnsupported = 106 + ExControlPlaneConfig = 108 + ExControlPlaneUnavailable = 109 + + // Error codes specific to a Kubernetes service + ExSvcError = 110 + ExSvcConflict = 111 + ExSvcTimeout = 112 + ExSvcNotRunning = 113 + ExSvcNotFound = 115 + ExSvcUnsupported = 116 + ExSvcPermission = 117 + ExSvcConfig = 118 + ExSvcUnavailable = 119 + + // Reserve 128+ for OS signal based exit codes +) diff --git a/pkg/minikube/reason/known_issues.go b/pkg/minikube/reason/known_issues.go new file mode 100644 index 0000000000..d83f6cdcf2 --- /dev/null +++ b/pkg/minikube/reason/known_issues.go @@ -0,0 +1,996 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Copyright 2019 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY matchND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reason + +import ( + "regexp" + + "k8s.io/minikube/pkg/minikube/style" +) + +// links used by multiple known issues +const ( + proxyDoc = "https://minikube.sigs.k8s.io/docs/handbook/vpn_and_proxy/" + vpnDoc = "https://minikube.sigs.k8s.io/docs/handbook/vpn_and_proxy/" +) + +// re is a shortcut around regexp.MustCompile +func re(s string) *regexp.Regexp { + return regexp.MustCompile(s) +} + +// programIssues are issues with the minikube binary +var programIssues = []match{ + { + Kind: Kind{ + ID: "MK_KVERSION_USAGE", + ExitCode: ExProgramUsage, + Advice: "Specify --kubernetes-version in v. form. example: 'v1.1.14'", + }, + + Regexp: re(`No Major.Minor.Patch elements found`), + }, +} + +// resourceIssues are failures due to resource constraints +var resourceIssues = []match{ + { + Kind: Kind{ + ID: "RSRC_KVM_OOM", + ExitCode: ExInsufficientMemory, + Advice: "Choose a smaller value for --memory, such as 2000", + Issues: []int{6366}, + }, + Regexp: re(`cannot set up guest memory.*Cannot allocate memory`), + GOOS: []string{"linux"}, + }, + { + Kind: Kind{ + ID: "RSRC_SSH_OOM", + ExitCode: ExInsufficientMemory, + Advice: "Disable dynamic memory in your VM manager, or pass in a larger --memory value", + Issues: []int{1766}, + }, + Regexp: re(`Process exited with status 137 from signal matchLL`), + }, + { + Kind: Kind{ + ID: "RSRC_SCP_OOM", + ExitCode: ExInsufficientMemory, + Advice: "Disable dynamic memory in your VM manager, or pass in a larger --memory value", + Issues: []int{1766}, + }, + Regexp: re(`An existing connection was forcibly closed by the remote host`), + }, + { + // Fallback to deliver a good error message even if internal checks are not run + Kind: Kind{ + ID: "RSRC_INSUFFICIENT_CORES", + ExitCode: ExInsufficientCores, + Advice: "Kubernetes requires at least 2 CPU's to start", + Issues: []int{7905}, + URL: "https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/", + }, + Regexp: re(`ERROR.*the number of available CPUs 1 is less than the required 2`), + }, +} + +// hostIssues are related to the host operating system or BIOS +var hostIssues = []match{ + { + Kind: Kind{ + ID: "HOST_VIRT_UNAVAILABLE", + ExitCode: ExHostConfig, + Advice: "Virtualization support is disabled on your computer. If you are running minikube within a VM, try '--driver=docker'. Otherwise, consult your systems BIOS manual for how to enable virtualization.", + Issues: []int{3900, 4730}, + }, + Regexp: re(`This computer doesn't have VT-X/AMD-v enabled`), + }, + { + Kind: Kind{ + ID: "HOST_VTX_DISABLED", + ExitCode: ExHostConfig, + Advice: "Virtualization support is disabled on your computer. If you are running minikube within a VM, try '--driver=docker'. Otherwise, consult your systems BIOS manual for how to enable virtualization.", + Issues: []int{5282, 5456}, + }, + Regexp: re(`VT-x is disabled.*VERR_VMX_MSR_ALL_VMX_DISABLED`), + }, + { + Kind: Kind{ + ID: "HOST_VTX_UNAVAILABLE", + ExitCode: ExHostConfig, + Advice: "Your host does not support virtualization. If you are running minikube within a VM, try '--driver=docker'. Otherwise, enable virtualization in your BIOS", + Issues: []int{1994, 5326}, + }, + Regexp: re(`VT-x is not available.*VERR_VMX_NO_VMX`), + }, + { + Kind: Kind{ + ID: "HOST_SVM_DISABLED", + ExitCode: ExHostConfig, + Advice: "Your host does not support virtualization. If you are running minikube within a VM, try '--driver=docker'. Otherwise, enable virtualization in your BIOS", + Issues: []int{7074}, + }, + Regexp: re(`VERR_SVM_DISABLED`), + }, + { + Kind: Kind{ + ID: "HOST_NON_C_DRIVE", + ExitCode: ExHostUsage, + Advice: "Run minikube from the C: drive.", + Issues: []int{1574}, + }, + Regexp: re(`.iso: The system cannot find the path specified.`), + }, + { + Kind: Kind{ + ID: "HOST_KUBECONFIG_WRITE", + ExitCode: ExHostPermission, + Advice: "Unset the KUBECONFIG environment variable, or verify that it does not point to an empty or otherwise invalid path", + Issues: []int{5268, 4100, 5207}, + }, + Regexp: re(`Failed to setup kubeconfig: writing kubeconfig`), + }, + { + Kind: Kind{ + ID: "HOST_KUBECONFIG_PERMISSION", + ExitCode: ExHostPermission, + Advice: "Run: 'sudo chown $USER $HOME/.kube/config && chmod 600 $HOME/.kube/config'", + Issues: []int{5714}, + Style: style.NotAllowed, + }, + Regexp: re(`.kube/config: permission denied`), + GOOS: []string{"darwin", "linux"}, + }, + { + Kind: Kind{ + ID: "HOST_JUJU_LOCK_PERMISSION", + ExitCode: ExHostPermission, + Advice: "Run 'sudo sysctl fs.protected_regular=0', or try a driver which does not require root, such as '--driver=docker'", + Issues: []int{6391}, + }, + Regexp: re(`unable to open /tmp/juju.*: permission denied`), + GOOS: []string{"linux"}, + }, + { + Kind: Kind{ + ID: "HOST_DOCKER_CHROMEOS", + ExitCode: ExHostUnsupported, + Advice: "ChromeOS is missing the kernel support necessary for running Kubernetes", + Issues: []int{6411}, + }, + Regexp: re(`Container.*is not running.*chown docker:docker`), + }, +} + +// providerIssues are failures relating to a driver provider +var providerIssues = []match{ + // General + { + Kind: Kind{ + ID: "PR_PRECREATE_EXIT_1", + ExitCode: ExProviderError, + Advice: "The hypervisor does not appear to be configured properly. Run 'minikube start --alsologtostderr -v=1' and inspect the error code", + Issues: []int{6098}, + }, + Regexp: re(`precreate: exit status 1`), + }, + + // Docker environment + { + Kind: Kind{ + ID: "PR_DOCKER_CGROUP_MOUNT", + ExitCode: ExProviderError, + Advice: "Run: 'sudo mkdir /sys/fs/cgroup/systemd && sudo mount -t cgroup -o none,name=systemd cgroup /sys/fs/cgroup/systemd'", + URL: "https://github.com/microsoft/WSL/issues/4189", + Issues: []int{5392}, + }, + Regexp: re(`cannot find cgroup mount destination: unknown`), + GOOS: []string{"linux"}, + }, + { + Kind: Kind{ + ID: "PR_DOCKER_READONLY_VOL", + ExitCode: ExProviderError, + Advice: "Restart Docker", + Issues: []int{6825}, + }, + Regexp: re(`mkdir /var/lib/docker/volumes.*: read-only file system`), + }, + { + Kind: Kind{ + ID: "PR_DOCKER_NO_SSH", + ExitCode: ExProviderTimeout, + Advice: "Restart Docker, Ensure docker is running and then run: 'minikube delete' and then 'minikube start' again", + URL: "https://github.com/kubernetes/minikube/issues/8163#issuecomment-652627436", + Issues: []int{8163}, + }, + Regexp: re(`executing "" at `), + }, + + // Hyperkit hypervisor + { + Kind: Kind{ + ID: "PR_HYPERKIT_NO_IP", + ExitCode: ExProviderError, + Advice: "Install the latest hyperkit binary, and run 'minikube delete'", + URL: "https://minikube.sigs.k8s.io/docs/reference/drivers/hyperkit/", + Issues: []int{1926, 4206}, + }, + Regexp: re(`IP address never found in dhcp leases file Temporary Error: Could not find an IP address for`), + GOOS: []string{"darwin"}, + }, + { + Kind: Kind{ + ID: "PR_HYPERKIT_NOT_FOUND", + ExitCode: ExProviderNotFound, + Advice: "Please install the minikube hyperkit VM driver, or select an alternative --driver", + URL: "https://minikube.sigs.k8s.io/docs/reference/drivers/hyperkit/", + }, + Regexp: re(`Driver "hyperkit" not found.`), + GOOS: []string{"darwin"}, + }, + { + Kind: Kind{ + ID: "PR_HYPERKIT_VMNET_FRAMEWORK", + ExitCode: ExProviderError, + Advice: "Hyperkit networking is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver", + Issues: []int{6028, 5594}, + }, + Regexp: re(`error from vmnet.framework: -1`), + GOOS: []string{"darwin"}, + }, + { + Kind: Kind{ + ID: "PR_HYPERKIT_CRASHED", + ExitCode: ExProviderError, + Advice: "Hyperkit is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver", + Issues: []int{6079, 5780}, + }, + Regexp: re(`hyperkit crashed!`), + GOOS: []string{"darwin"}, + }, + + // Hyper-V hypervisor + { + Kind: Kind{ + ID: "PR_HYPERV_AS_ADMIN", + ExitCode: ExProviderPermission, + Advice: "Right-click the PowerShell icon and select Run as Administrator to open PowerShell in elevated mode.", + URL: "https://rominirani.com/docker-machine-windows-10-hyper-v-troubleshooting-tips-367c1ea73c24", + Issues: []int{4511}, + }, + Regexp: re(`Hyper-v commands have to be run as an Administrator`), + GOOS: []string{"windows"}, + }, + { + Kind: Kind{ + ID: "PR_HYPERV_NEEDS_ESC", + ExitCode: ExProviderPermission, + Advice: "Right-click the PowerShell icon and select Run as Administrator to open PowerShell in elevated mode.", + Issues: []int{7347}, + }, + Regexp: re(`The requested operation requires elevation.`), + GOOS: []string{"windows"}, + }, + + // KVM hypervisor + { + Kind: Kind{ + ID: "PR_KVM_CAPABILITIES", + ExitCode: ExProviderUnavailable, + Advice: "Your host does not support KVM virtualization. Ensure that qemu-kvm is installed, and run 'virt-host-validate' to debug the problem", + URL: "http://mikko.repolainen.fi/documents/virtualization-with-kvm", + Issues: []int{2991}, + }, + Regexp: re(`invalid argument: could not find capabilities for domaintype=kvm`), + GOOS: []string{"linux"}, + }, + { + Kind: Kind{ + ID: "PR_KVM_SOCKET", + ExitCode: ExProviderUnavailable, + Advice: "Check that libvirt is setup properly", + URL: "https://minikube.sigs.k8s.io/docs/reference/drivers/kvm2/", + }, + Regexp: re(`error connecting to libvirt socket`), + GOOS: []string{"linux"}, + }, + { + Kind: Kind{ + ID: "PR_KVM_ISO_PERMISSION", + ExitCode: ExProviderPermission, + Advice: "Ensure that the user listed in /etc/libvirt/qemu.conf has access to your home directory", + Issues: []int{5950}, + }, + Regexp: re(`boot2docker.iso.*Permission denied`), + GOOS: []string{"linux"}, + }, + { + Kind: Kind{ + ID: "PR_KVM_NET_XML", + ExitCode: ExProviderConfig, + Advice: "Rebuild libvirt with virt-network support", + URL: "https://forums.gentoo.org/viewtopic-t-981692-start-0.html", + Issues: []int{4195}, + }, + Regexp: re(`not supported by the connection driver: virNetworkDefineXML`), + GOOS: []string{"linux"}, + }, + { + Kind: Kind{ + ID: "PR_KVM_MSR", + ExitCode: ExProviderError, + Advice: "Upgrade to QEMU v3.1.0+, run 'virt-host-validate', or ensure that you are not running in a nested VM environment.", + Issues: []int{4277}, + }, + Regexp: re(`qemu unexpectedly closed the monitor.*failed to set MSR`), + GOOS: []string{"linux"}, + }, + { + Kind: Kind{ + ID: "PR_KVM_CREATE_BUSY", + ExitCode: ExDriverConflict, + Advice: "Another hypervisor, such as VirtualBox, is conflicting with KVM. Please stop the other hypervisor, or use --driver to switch to it.", + Issues: []int{4913}, + }, + Regexp: re(`KVM_CREATE_VM.* failed:.* Device or resource busy`), + GOOS: []string{"linux"}, + }, + + // VirtualBox provider + { + Kind: Kind{ + ID: "PR_VBOX_BLOCKED", + ExitCode: ExProviderPermission, + Advice: "Reinstall VirtualBox and verify that it is not blocked: System Preferences -> Security & Privacy -> General -> Some system software was blocked from loading", + Issues: []int{4107}, + }, + Regexp: re(`NS_ERROR.*0x80004005`), + GOOS: []string{"darwin"}, + }, + { + Kind: Kind{ + ID: "PR_VBOX_MODULE", + ExitCode: ExProviderNotRunning, + Advice: "Reinstall VirtualBox and reboot. Alternatively, try the kvm2 driver: https://minikube.sigs.k8s.io/docs/reference/drivers/kvm2/", + Issues: []int{4043, 4711}, + }, + Regexp: re(`vboxdrv kernel module is not loaded`), + }, + { + Kind: Kind{ + ID: "PR_VBOX_DEVICE_MISSING", + ExitCode: ExProviderNotRunning, + Advice: "Reinstall VirtualBox and reboot. Alternatively, try the kvm2 driver: https://minikube.sigs.k8s.io/docs/reference/drivers/kvm2/", + Issues: []int{3974}, + }, + Regexp: re(`vboxdrv does not exist`), + }, + { + Kind: Kind{ + ID: "PR_VBOX_HARDENING", + ExitCode: ExProviderConflict, + Advice: "VirtualBox is broken. Disable real-time anti-virus software, reboot, and reinstall VirtualBox if the problem continues.", + Issues: []int{3859, 3910}, + URL: "https://forums.virtualbox.org/viewtopic.php?f=25&t=82106", + }, + Regexp: re(`terminated unexpectedly.*VBoxHardening`), + GOOS: []string{"windows"}, + }, + { + Kind: Kind{ + ID: "PR_VBOX_80004005", + ExitCode: ExProviderError, + Advice: "VirtualBox is broken. Reinstall VirtualBox, reboot, and run 'minikube delete'.", + Issues: []int{5227}, + }, + Regexp: re(`terminated unexpectedly.*NS_ERROR.*0x80004005`), + GOOS: []string{"linux"}, + }, + { + Kind: Kind{ + ID: "PR_VBOX_HYPERV_64_BOOT", + ExitCode: ExProviderConflict, + Advice: "VirtualBox and Hyper-V are having a conflict. Use '--driver=hyperv' or disable Hyper-V using: 'bcdedit /set hypervisorlaunchtype off'", + Issues: []int{4051, 4783}, + }, + Regexp: re(`VirtualBox won't boot a 64bits VM when Hyper-V is activated`), + }, + { + Kind: Kind{ + ID: "PR_VBOX_HYPERV_CONFLICT", + ExitCode: ExProviderConflict, + Advice: "VirtualBox and Hyper-V are having a conflict. Use '--driver=hyperv' or disable Hyper-V using: 'bcdedit /set hypervisorlaunchtype off'", + Issues: []int{4587}, + }, + Regexp: re(`vrc=VERR_NEM_VM_CREATE`), + }, + { + Kind: Kind{ + ID: "PR_VBOXMANAGE_NOT_FOUND", + ExitCode: ExProviderNotFound, + Advice: "Install VirtualBox and ensure it is in the path, or select an alternative value for --driver", + URL: "https://minikube.sigs.k8s.io/docs/start/", + Issues: []int{3784}, + }, + Regexp: re(`VBoxManage not found. Make sure VirtualBox is installed and VBoxManage is in the path`), + }, +} + +// driverIssues are specific to a libmachine driver +var driverIssues = []match{ + // Generic VM driver + { + Kind: Kind{ + ID: "DRV_CORRUPT", + ExitCode: ExDriverError, + Advice: "The VM driver exited with an error, and may be corrupt. Run 'minikube start' with --alsologtostderr -v=8 to see the error", + URL: "https://minikube.sigs.k8s.io/docs/reference/drivers/", + NewIssueLink: true, + }, + Regexp: re(`Error attempting to get plugin server address for RPC`), + }, + { + Kind: Kind{ + ID: "DRV_EXITED_1", + ExitCode: ExDriverError, + Advice: "The VM driver crashed. Run 'minikube start --alsologtostderr -v=8' to see the VM driver error message", + URL: "https://minikube.sigs.k8s.io/docs/reference/drivers/#troubleshooting", + NewIssueLink: true, + }, + Regexp: re(`Unable to start VM: start: exit status 1`), + }, + { + Kind: Kind{ + ID: "DRV_REGISTRY_NOT_FOUND", + ExitCode: ExDriverUnsupported, + Advice: "Your minikube config refers to an unsupported driver. Erase ~/.minikube, and try again.", + Issues: []int{5295}, + }, + Regexp: re(`registry: driver not found`), + }, + { + Kind: Kind{ + ID: "DRV_MISSING_ADDRESS", + ExitCode: ExDriverError, + Advice: "The machine-driver specified is failing to start. Try running 'docker-machine-driver- version'", + Issues: []int{6023, 4679}, + NewIssueLink: true, + }, + Regexp: re(`new host: dial tcp: missing address`), + }, + { + Kind: Kind{ + ID: "DRV_CREATE_TIMEOUT", + ExitCode: ExDriverTimeout, + Advice: "Try 'minikube delete', and disable any conflicting VPN or firewall software", + Issues: []int{7072}, + }, + Regexp: re(`create host timed out in \d`), + }, + { + Kind: Kind{ + ID: "DRV_IMAGE_ARCH_UNSUPPORTED", + ExitCode: ExDriverUnsupported, + Advice: "This driver does not yet work on your architecture. Maybe try --driver=none", + Issues: []int{7071}, + }, + Regexp: re(`Error: incompatible image architecture`), + GOOS: []string{"linux"}, + }, + + // Hyper-V + { + Kind: Kind{ + ID: "DRV_HYPERV_NO_VSWITCH", + ExitCode: ExDriverConfig, + Advice: "Configure an external network switch following the official documentation, then add `--hyperv-virtual-switch=` to `minikube start`", + URL: "https://docs.docker.com/machine/drivers/hyper-v/", + }, + Regexp: re(`no External vswitch found. A valid vswitch must be available for this command to run.`), + GOOS: []string{"windows"}, + }, + { + Kind: Kind{ + ID: "DRV_HYPERV_VSWITCH_NOT_FOUND", + ExitCode: ExDriverUsage, + Advice: "Confirm that you have supplied the correct value to --hyperv-virtual-switch using the 'Get-VMSwitch' command", + URL: "https://docs.docker.com/machine/drivers/hyper-v/", + }, + Regexp: re(`precreate: vswitch.*not found`), + GOOS: []string{"windows"}, + }, + { + Kind: Kind{ + ID: "DRV_HYPERV_POWERSHELL_NOT_FOUND", + ExitCode: ExDriverUnavailable, + Advice: "To start minikube with Hyper-V, Powershell must be in your PATH`", + URL: "https://docs.docker.com/machine/drivers/hyper-v/", + }, + Regexp: re(`Powershell was not found in the path`), + GOOS: []string{"windows"}, + }, + + { + Kind: Kind{ + ID: "DRV_HYPERV_FILE_DELETE", + ExitCode: ExDriverConflict, + Advice: "You may need to stop the Hyper-V Manager and run `minikube delete` again.", + Issues: []int{6804}, + }, + Regexp: re(`Unable to remove machine directory`), + GOOS: []string{"windows"}, + }, + + // KVM + { + Kind: Kind{ + ID: "DRV_KVM2_NOT_FOUND", + ExitCode: ExDriverNotFound, + Advice: "Please install the minikube kvm2 VM driver, or select an alternative --driver", + URL: "https://minikube.sigs.k8s.io/docs/reference/drivers/kvm2/", + }, + Regexp: re(`Driver "kvm2" not found. Do you have the plugin binary .* accessible in your PATH`), + GOOS: []string{"linux"}, + }, + + { + Kind: Kind{ + ID: "DRV_RESTART_NO_IP", + ExitCode: ExDriverTimeout, + Advice: "The KVM driver is unable to resurrect this old VM. Please run `minikube delete` to delete it and try again.", + Issues: []int{3901, 3434}, + }, + Regexp: re(`Error starting stopped host: Machine didn't return an IP after \d+ seconds`), + }, + { + Kind: Kind{ + ID: "DRV_NO_IP", + ExitCode: ExDriverTimeout, + Advice: "Check your firewall rules for interference, and run 'virt-host-validate' to check for KVM configuration issues. If you are running minikube within a VM, consider using --driver=none", + URL: "https://minikube.sigs.k8s.io/docs/reference/drivers/kvm2/", + Issues: []int{4249, 3566}, + }, + Regexp: re(`Error in driver during machine creation: Machine didn't return an IP after \d+ seconds`), + GOOS: []string{"linux"}, + }, +} + +// localNetworkIssues are errors communicating to the guest +var localNetworkIssues = []match{ + { + Kind: Kind{ + ID: "IF_SSH_AUTH", + ExitCode: ExLocalNetworkConfig, + Advice: "Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.", + URL: vpnDoc, + Issues: []int{3930}, + }, + Regexp: re(`ssh: handshake failed: ssh: unable to authenticate.*, no supported methods remain`), + }, + { + Kind: Kind{ + ID: "IF_SSH_NO_RESPONSE", + ExitCode: ExLocalNetworkConfig, + Advice: "Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.", + URL: vpnDoc, + Issues: []int{3388}, + }, + Regexp: re(`dial tcp .*:22: connectex: A connection attempt failed because the connected party did not properly respond`), + }, + { + Kind: Kind{ + ID: "IF_HOST_CIDR_CONFLICT", + ExitCode: ExLocalNetworkConflict, + Advice: "Specify an alternate --host-only-cidr value, such as 172.16.0.1/24", + Issues: []int{3594}, + }, + Regexp: re(`host-only cidr conflicts with the network address of a host interface`), + }, + { + Kind: Kind{ + ID: "IF_VBOX_NOT_VISIBLE", + ExitCode: ExLocalNetworkNotFound, + Advice: "Reboot to complete VirtualBox installation, verify that VirtualBox is not blocked by your system, and/or use another hypervisor", + Issues: []int{3614, 4222, 5817}, + URL: "https://stackoverflow.com/questions/52277019/how-to-fix-vm-issue-with-minikube-start", + }, + Regexp: re(`The host-only adapter we just created is not visible`), + }, + { + Kind: Kind{ + ID: "IF_VBOX_SAME_IP", + ExitCode: ExLocalNetworkConflict, + Advice: "Use VirtualBox to remove the conflicting VM and/or network interfaces", + URL: "https://stackoverflow.com/questions/55573426/virtualbox-is-configured-with-multiple-host-only-adapters-with-the-same-ip-whe", + Issues: []int{3584}, + }, + Regexp: re(`VirtualBox is configured with multiple host-only adapters with the same IP`), + }, + { + Kind: Kind{ + ID: "IF_VBOX_NOT_FOUND", + ExitCode: ExLocalNetworkNotFound, + Advice: "VirtualBox is unable to find its network interface. Try upgrading to the latest release and rebooting.", + Issues: []int{6036}, + }, + Regexp: re(`ERR_INTNET_FLT_IF_NOT_FOUND`), + }, + { + Kind: Kind{ + ID: "IF_VBOX_UNSPECIFIED", + ExitCode: ExLocalNetworkConflict, + Advice: "VirtualBox cannot create a network, probably because it conflicts with an existing network that minikube no longer knows about. Try running 'minikube delete'", + Issues: []int{5260}, + }, + Regexp: re(`Error setting up host only network on machine start.*Unspecified error`), + }, + { + Kind: Kind{ + ID: "IF_SSH_TIMEOUT", + ExitCode: ExLocalNetworkTimeout, + Advice: "Try 'minikube delete', and disable any conflicting VPN or firewall software", + Issues: []int{4617}, + }, + Regexp: re(`waiting for SSH to be available`), + }, +} + +// internetIssues are internet related problems. +var internetIssues = []match{ + { + Kind: Kind{ + ID: "INET_GCR_UNAVAILABLE", + ExitCode: ExInternetUnavailable, + Advice: "minikube is unable to access the Google Container Registry. You may need to configure it to use a HTTP proxy.", + URL: proxyDoc, + Issues: []int{3860}, + }, + Regexp: re(`gcr.io.*443: connect: invalid argument`), + }, + { + Kind: Kind{ + ID: "INET_RESET_BY_PEER", + ExitCode: ExInternetUnavailable, + Advice: "A firewall is likely blocking minikube from reaching the internet. You may need to configure minikube to use a proxy.", + URL: proxyDoc, + Issues: []int{3909}, + }, + Regexp: re(`Error downloading .*connection reset by peer`), + }, + { + Kind: Kind{ + ID: "INET_DOWNLOAD_TIMEOUT", + ExitCode: ExInternetTimeout, + Advice: "A firewall is likely blocking minikube from reaching the internet. You may need to configure minikube to use a proxy.", + URL: proxyDoc, + Issues: []int{3846}, + }, + Regexp: re(`Error downloading .*timeout`), + }, + { + Kind: Kind{ + ID: "INET_TLS_OVERSIZED", + ExitCode: ExInternetConflict, + Advice: "A firewall is interfering with minikube's ability to make outgoing HTTPS requests. You may need to change the value of the HTTPS_PROXY environment variable.", + URL: proxyDoc, + Issues: []int{3857, 3759, 4252}, + }, + Regexp: re(`tls: oversized record received with length`), + }, + { + Kind: Kind{ + ID: "INET_DOWNLOAD_BLOCKED", + ExitCode: ExInternetTimeout, + Advice: "A firewall is likely blocking minikube from reaching the internet. You may need to configure minikube to use a proxy.", + URL: proxyDoc, + Issues: []int{3922, 6109, 6123}, + }, + Regexp: re(`iso: failed to download|download.*host has failed to respond`), + }, + { + Kind: Kind{ + ID: "INET_PULL_TIMEOUT", + ExitCode: ExInternetTimeout, + Advice: "A firewall is blocking Docker the minikube VM from reaching the image repository. You may need to select --image-repository, or use a proxy.", + URL: proxyDoc, + Issues: []int{3898, 6070}, + }, + Regexp: re(`ImagePull.*Timeout exceeded while awaiting headers`), + }, + { + Kind: Kind{ + ID: "INET_LOOKUP_HOST", + ExitCode: ExInternetConfig, + Advice: "Verify that your HTTP_PROXY and HTTPS_PROXY environment variables are set correctly.", + URL: proxyDoc, + }, + Regexp: re(`dial tcp: lookup.*: no such host`), + }, + { + Kind: Kind{ + ID: "INET_PROXY_CONFUSION", + ExitCode: ExInternetConfig, + Advice: "Ensure that your value for HTTPS_PROXY points to an HTTPS proxy rather than an HTTP proxy", + Issues: []int{6107}, + URL: proxyDoc, + }, + Regexp: re(`http: server gave HTTP response to HTTPS client`), + }, + { + Kind: Kind{ + ID: "INET_NOT_TLS", + ExitCode: ExInternetConfig, + Advice: "Ensure that your value for HTTPS_PROXY points to an HTTPS proxy rather than an HTTP proxy", + Issues: []int{7286}, + URL: proxyDoc, + }, + Regexp: re(`tls: first record does not look like a TLS handshake`), + }, + { + Kind: Kind{ + ID: "INET_PROXY_503", + ExitCode: ExInternetConfig, + Advice: "Confirm that you have a working internet connection and that your VM has not run out of resources by using: 'minikube logs'", + Issues: []int{4749}, + }, + Regexp: re(`proxy.*unexpected response code: 503`), + }, + { + Kind: Kind{ + ID: "INET_DEFAULT_ROUTE", + ExitCode: ExInternetNotFound, + Advice: "Configure a default route on this Linux host, or use another --driver that does not require it", + Issues: []int{6083, 5636}, + }, + Regexp: re(`(No|from) default routes`), + GOOS: []string{"linux"}, + }, +} + +var guestIssues = []match{ + { + Kind: Kind{ + ID: "GUEST_KVM2_NO_DOMAIN", + ExitCode: ExGuestNotFound, + Advice: "The VM that minikube is configured for no longer exists. Run 'minikube delete'", + Issues: []int{3636}, + }, + Regexp: re(`no domain with matching name`), + GOOS: []string{"linux"}, + }, + { + Kind: Kind{ + ID: "GUEST_PORT_IN_USE", + ExitCode: ExGuestConflict, + Advice: "kubeadm detected a TCP port conflict with another process: probably another local Kubernetes installation. Run lsof -p to find the process and kill it", + Issues: []int{5484}, + }, + Regexp: re(`ERROR Port-.*is in use`), + GOOS: []string{"linux"}, + }, + + { + Kind: Kind{ + ID: "GUEST_DOES_NOT_EXIST", + ExitCode: ExGuestNotFound, + Advice: "Run 'minikube delete' to delete the stale VM, or and ensure that minikube is running as the same user you are issuing this command with", + Issues: []int{3864, 6087}, + }, + Regexp: re(`machine does not exist`), + }, + { + Kind: Kind{ + ID: "GUEST_NOT_FOUND", + ExitCode: ExGuestNotFound, + Advice: "Your minikube vm is not running, try minikube start.", + Issues: []int{4889}, + }, + Regexp: re(`Machine does not exist for api.Exists`), + }, + { + Kind: Kind{ + ID: "GUEST_IP_NOT_FOUND", + ExitCode: ExGuestNotRunning, + Advice: "The minikube VM is offline. Please run 'minikube start' to start it again.", + Issues: []int{3849, 3648}, + }, + Regexp: re(`Error getting ssh host name for driver: IP not found`), + }, + { + Kind: Kind{ + ID: "GUEST_UNSIGNED_CERT", + ExitCode: ExGuestConfig, + Advice: "Try 'minikube delete' to force new SSL certificates to be installed", + Issues: []int{6596}, + }, + Regexp: re(`not signed by CA certificate ca: crypto/rsa: verification error`), + }, + { + Kind: Kind{ + ID: "GUEST_VBOX_NO_VM", + ExitCode: ExGuestNotFound, + Advice: "The VM that minikube is configured for no longer exists. Run 'minikube delete'", + Issues: []int{4694}, + }, + Regexp: re(`Could not find a registered machine named`), + }, + { + Kind: Kind{ + ID: "GUEST_FILE_IN_USE", + ExitCode: ExGuestConflict, + Advice: "Another program is using a file required by minikube. If you are using Hyper-V, try stopping the minikube VM from within the Hyper-V manager", + URL: "https://docs.docker.com/machine/drivers/hyper-v/", + Issues: []int{7300}, + }, + Regexp: re(`The process cannot access the file because it is being used by another process`), + GOOS: []string{"windows"}, + }, +} + +// runtimeIssues are container runtime issues (containerd, docker, etc) +var runtimeIssues = []match{ + { + Kind: Kind{ + ID: "RT_DOCKER_RESTART", + ExitCode: ExRuntimeError, + Advice: "Remove the incompatible --docker-opt flag if one was provided", + Issues: []int{7070}, + }, + Regexp: re(`systemctl -f restart docker`), + }, + { + Kind: Kind{ + ID: "RT_DOCKER_UNAVAILABLE", + ExitCode: ExRuntimeUnavailable, + Advice: "Docker inside the VM is unavailable. Try running 'minikube delete' to reset the VM.", + Issues: []int{3952}, + }, + Regexp: re(`Error configuring auth on host: OS type not recognized`), + }, + { + Kind: Kind{ + ID: "RT_DOCKER_EXIT_1", + ExitCode: ExRuntimeNotFound, + Advice: "Either systemctl is not installed, or Docker is broken. Run 'sudo systemctl start docker' and 'journalctl -u docker'", + URL: "https://minikube.sigs.k8s.io/docs/reference/drivers/none", + Issues: []int{2704, 4498}, + }, + Regexp: re(`sudo systemctl start docker: exit status 1`), + GOOS: []string{"linux"}, + }, + { + Kind: Kind{ + ID: "RT_DOCKER_EXIT_5", + ExitCode: ExRuntimeUnavailable, + Advice: "Ensure that Docker is installed and healthy: Run 'sudo systemctl start docker' and 'journalctl -u docker'. Alternatively, select another value for --driver", + URL: "https://minikube.sigs.k8s.io/docs/reference/drivers/none", + Issues: []int{5532}, + }, + Regexp: re(`sudo systemctl start docker: exit status 5`), + GOOS: []string{"linux"}, + }, + { + Kind: Kind{ + ID: "RT_CRIO_EXIT_5", + ExitCode: ExRuntimeUnavailable, + Advice: "Ensure that CRI-O is installed and healthy: Run 'sudo systemctl start crio' and 'journalctl -u crio'. Alternatively, use --container-runtime=docker", + URL: "https://minikube.sigs.k8s.io/docs/reference/drivers/none", + Issues: []int{5532}, + }, + Regexp: re(`sudo systemctl restart crio: exit status 5`), + GOOS: []string{"linux"}, + }, +} + +// controlPlaneIssues are Kubernetes deployment issues +var controlPlaneIssues = []match{ + { + Kind: Kind{ + ID: "K8S_APISERVER_MISSING", + ExitCode: ExControlPlaneNotFound, + Advice: "Check that the provided apiserver flags are valid, and that SELinux is disabled", + Issues: []int{4536, 6014}, + }, + Regexp: re(`apiserver process never appeared`), + }, + { + Kind: Kind{ + ID: "K8S_APISERVER_TIMEOUT", + ExitCode: ExControlPlaneTimeout, + Advice: "A VPN or firewall is interfering with HTTP access to the minikube VM. Alternatively, try a different VM driver: https://minikube.sigs.k8s.io/docs/start/", + URL: vpnDoc, + Issues: []int{4302}, + }, + Regexp: re(`apiserver: timed out waiting for the condition`), + }, + { + Kind: Kind{ + ID: "K8S_DNS_TIMEOUT", + ExitCode: ExControlPlaneTimeout, + Advice: "Run 'kubectl describe pod coredns -n kube-system' and check for a firewall or DNS conflict", + URL: vpnDoc, + }, + Regexp: re(`dns: timed out waiting for the condition`), + }, + { + Kind: Kind{ + ID: "K8S_KUBELET_NOT_RUNNING", + ExitCode: ExControlPlaneUnavailable, + Advice: "Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start", + Issues: []int{4172}, + }, + Regexp: re(`The kubelet is not running|kubelet isn't running`), + GOOS: []string{"linux"}, + }, + { + Kind: Kind{ + ID: "K8S_INVALID_DNS_DOMAIN", + ExitCode: ExControlPlaneConfig, + Advice: "Select a valid value for --dnsdomain", + }, + Regexp: re(`dnsDomain: Invalid`), + }, +} + +// serviceIssues are issues with services running on top of Kubernetes +var serviceIssues = []match{ + { + Kind: Kind{ + ID: "SVC_ENDPOINT_NOT_FOUND", + ExitCode: ExSvcNotFound, + Advice: "Please make sure the service you are looking for is deployed or is in the correct namespace.", + Issues: []int{4599}, + }, + Regexp: re(`Could not find finalized endpoint being pointed to by`), + }, + { + Kind: Kind{ + ID: "SVC_OPEN_NOT_FOUND", + ExitCode: ExSvcNotFound, + Advice: "Use 'kubect get po -A' to find the correct and namespace name", + Issues: []int{5836}, + }, + Regexp: re(`Error opening service.*not found`), + }, + { + Kind: Kind{ + ID: "SVC_DASHBOARD_ROLE_REF", + ExitCode: ExSvcPermission, + Advice: "Run: 'kubectl delete clusterrolebinding kubernetes-dashboard'", + Issues: []int{7256}, + }, + Regexp: re(`dashboard.*cannot change roleRef`), + }, +} diff --git a/pkg/minikube/reason/match.go b/pkg/minikube/reason/match.go new file mode 100644 index 0000000000..8abac44c30 --- /dev/null +++ b/pkg/minikube/reason/match.go @@ -0,0 +1,87 @@ +/* +Copyright 2019 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reason + +import ( + "regexp" + + "github.com/golang/glog" +) + +// match matches a known issue within minikube +type match struct { + // Inherit ID, ExitCode, and Style from reason.Kind + Kind + + // Regexp is which regular expression this issue matches + Regexp *regexp.Regexp + // Operating systems this error is specific to + GOOS []string +} + +func knownIssues() []match { + ps := []match{} + // This is intentionally in dependency order + ps = append(ps, programIssues...) + ps = append(ps, resourceIssues...) + ps = append(ps, hostIssues...) + ps = append(ps, providerIssues...) + ps = append(ps, driverIssues...) + ps = append(ps, localNetworkIssues...) + ps = append(ps, internetIssues...) + ps = append(ps, guestIssues...) + ps = append(ps, runtimeIssues...) + ps = append(ps, controlPlaneIssues...) + ps = append(ps, serviceIssues...) + return ps +} + +// MatchKnownIssue returns a known issue from an error on an OS +func MatchKnownIssue(r Kind, err error, goos string) *Kind { + // The kind passed in has specified that it should not be rematched + if r.NoMatch { + return nil + } + + var genericMatch *Kind + + for _, ki := range knownIssues() { + ki := ki + if ki.Regexp == nil { + glog.Errorf("known issue has no regexp: %+v", ki) + continue + } + + if !ki.Regexp.MatchString(err.Error()) { + continue + } + + // Does this match require an OS matchup? + if len(ki.GOOS) > 0 { + for _, o := range ki.GOOS { + if o == goos { + return &ki.Kind + } + } + } + if genericMatch == nil { + genericMatch = &ki.Kind + } + } + + return genericMatch +} diff --git a/pkg/minikube/reason/match_test.go b/pkg/minikube/reason/match_test.go new file mode 100644 index 0000000000..9ac4fd7bdc --- /dev/null +++ b/pkg/minikube/reason/match_test.go @@ -0,0 +1,90 @@ +/* +Copyright 2019 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reason + +import ( + "fmt" + "testing" +) + +func TestFromError(t *testing.T) { + tests := []struct { + issue int + os string + want string + err string + }{ + {0, "", "", "this is just a lame error message with no matches."}, + {2991, "linux", "PR_KVM_CAPABILITIES", "Unable to start VM: create: Error creating machine: Error in driver during machine creation: creating domain: Error defining domain xml:\n\n: virError(Code=8, Domain=44, Message='invalid argument: could not find capabilities for domaintype=kvm ')"}, + {3594, "", "IF_HOST_CIDR_CONFLICT", "Error starting host: Error starting stopped host: Error setting up host only network on machine start: host-only cidr conflicts with the network address of a host interface."}, + {3614, "", "IF_VBOX_NOT_VISIBLE", "Error starting host: Error starting stopped host: Error setting up host only network on machine start: The host-only adapter we just created is not visible. This is a well known VirtualBox bug. You might want to uninstall it and reinstall at least version 5.0.12 that is supposed to fix this issue"}, + {3784, "", "PR_VBOXMANAGE_NOT_FOUND", "create: precreate: VBoxManage not found. Make sure VirtualBox is installed and VBoxManage is in the path"}, + {3849, "", "GUEST_IP_NOT_FOUND", "bootstrapper: Error creating new ssh host from driver: Error getting ssh host name for driver: IP not found"}, + {3859, "windows", "PR_VBOX_HARDENING", `Unable to start VM: create: creating: Unable to start the VM: C:\Program Files\Oracle\VirtualBox\VBoxManage.exe startvm minikube --type headless failed: +VBoxManage.exe: error: The virtual machine 'minikube' has terminated unexpectedly during startup with exit code -1073741819 (0xc0000005). More details may be available in 'C:\Users\pabitra_b.minikube\machines\minikube\minikube\Logs\VBoxHardening.log' +VBoxManage.exe: error: Details: code E_FAIL (0x80004005), component MachineWrap, interface IMachine`}, + {3922, "", "INET_DOWNLOAD_BLOCKED", `unable to cache ISO: https://storage.googleapis.com/minikube/iso/minikube-v0.35.0.iso: failed to download: failed to download to temp file: download failed: 5 error(s) occurred: +* Temporary download error: Get https://storage.googleapis.com/minikube/iso/minikube-v0.35.0.iso: dial tcp 216.58.207.144:443: connectex: A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond.`}, + {4107, "darwin", "PR_VBOX_BLOCKED", "Result Code: NS_ERROR (0x80004005)"}, + {4302, "", "K8S_APISERVER_TIMEOUT", "apiserver: timed out waiting for the condition"}, + {4252, "", "INET_TLS_OVERSIZED", "Failed to update cluster: downloading binaries: downloading kubeadm: Error downloading kubeadm v1.14.1: failed to download: failed to download to temp file: download failed: 5 error(s) occurred:\n\nTemporary download error: Get https://storage.googleapis.com/kubernetes-release/release/v1.14.1/bin/linux/amd64/kubeadm: proxyconnect tcp: tls: oversized record received with length 20527"}, + {4222, "", "IF_VBOX_NOT_VISIBLE", "Unable to start VM: create: creating: Error setting up host only network on machine start: The host-only adapter we just created is not visible. This is a well known VirtualBox bug. You might want to uninstall it and reinstall at least version 5.0.12 that is supposed to fix this issue"}, + {6014, "linux", "K8S_APISERVER_MISSING", "Error restarting cluster: waiting for apiserver: apiserver process never appeared"}, + {5836, "", "SVC_OPEN_NOT_FOUND", `Error opening service: Service newservice was not found in "unknown" namespace. You may select another namespace by using 'minikube service newservice -n : Temporary Error: Error getting service newservice: services "newservice" not found`}, + {6087, "", "GUEST_DOES_NOT_EXIST", `Error getting machine status: state: machine does not exist`}, + {5714, "darwin", "HOST_KUBECONFIG_PERMISSION", `Failed to setup kubeconfig: writing kubeconfig: Error writing file /Users/matthewgleich/.kube/config: error writing file /Users/matthewgleich/.kube/config: open /Users/matthewgleich/.kube/config: permission denied`}, + {5532, "linux", "RT_DOCKER_EXIT_5", `Failed to enable container runtime: running command: sudo systemctl start docker: exit status 5`}, + {5532, "linux", "RT_CRIO_EXIT_5", `Failed to enable container runtime: running command: sudo systemctl restart crio: exit status 5`}, + {5484, "linux", "GUEST_PORT_IN_USE", `[ERROR Port-10252]: Port 10252 is in use`}, + {4913, "linux", "PR_KVM_CREATE_BUSY", `Unable to start VM: create: Error creating machine: Error in driver during machine creation: error creating VM: virError(Code=1, Domain=10, Message='internal error: process exited while connecting to monitor: ioctl(KVM_CREATE_VM) failed: 16 Device or resource busy`}, + {5950, "linux", "PR_KVM_ISO_PERMISSION", `Retriable failure: create: Error creating machine: Error in driver during machine creation: error creating VM: virError(Code=1, Domain=10, Message='internal error: qemu unexpectedly closed the monitor: 2019-11-19T16:08:16.757609Z qemu-kvm: -drive file=/home/lnicotra/.minikube/machines/minikube/boot2docker.iso,format=raw,if=none,id=drive-scsi0-0-0-2,readonly=on: could not open disk image /home/lnicotra/.minikube/machines/minikube/boot2docker.iso: Could not open '/home/lnicotra/.minikube/machines/minikube/boot2docker.iso': Permission denied'`}, + {5836, "", "SVC_OPEN_NOT_FOUND", `Error opening service: Service kubernetes-bootcamp was not found in "default" namespace. You may select another namespace by using 'minikube service kubernetes-bootcamp -n : Temporary Error: Error getting service kubernetes-bootcamp: services "kubernetes-bootcamp" not found`}, + {3898, "", "INET_PULL_TIMEOUT", `[ERROR ImagePull]: failed to pull image k8s.gcr.io/kube-controller-manager:v1.17.0: output: Error response from daemon: Get https://k8s.gcr.io/v2/: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)`}, + {6079, "darwin", "PR_HYPERKIT_CRASHED", `Error creating machine: Error in driver during machine creation: hyperkit crashed! command line:`}, + {5636, "linux", "INET_DEFAULT_ROUTE", `Unable to get VM IP address: unable to select an IP from default routes.`}, + {6087, "", "GUEST_DOES_NOT_EXIST", `Error getting host status: state: machine does not exist`}, + {6098, "windows", "PR_PRECREATE_EXIT_1", `Retriable failure: create: precreate: exit status 1`}, + {6107, "", "INET_PROXY_CONFUSION", `http: server gave HTTP response to HTTPS client`}, + {6109, "", "INET_DOWNLOAD_BLOCKED", `Failed to update cluster: downloading binaries: downloading kubelet: Error downloading kubelet v1.16.2: failed to download: failed to download to temp file: failed to copy contents: read tcp 192.168.0.106:61314->172.217.166.176:443: wsarecv: A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond.`}, + {6109, "", "INET_DOWNLOAD_BLOCKED", `Failed to update cluster: downloading binaries: downloading kubeadm: Error downloading kubeadm v1.17.0: failed to download: failed to download to temp file: failed to copy contents: read tcp [2606:a000:81c5:1e00:349a:26c0:7ea6:bbf1]:55317->[2607:f8b0:4004:815::2010]:443: wsarecv: A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond.`}, + {4277, "linux", "PR_KVM_MSR", `Unable to start VM: start: Error creating VM: virError(Code=1, Domain=10, Message='internal error: qemu unexpectedly closed the monitor: 2019-05-17T02:20:07.980140Z qemu-system-x86_64: error: failed to set MSR 0x38d to 0x0 qemu-system-x86_64: /build/qemu-lXHhGe/qemu-2.11+dfsg/target/i386/kvm.c:1807: kvm_put_msrs: Assertion ret == cpu->kvm_msr_buf->nmsrs failed.`}, + } + for _, tc := range tests { + t.Run(tc.want, func(t *testing.T) { + got := MatchKnownIssue(Kind{}, fmt.Errorf(tc.err), tc.os) + if got == nil { + if tc.want != "" { + t.Errorf("FromError(%q)=nil, want %s", tc.err, tc.want) + } + return + } + if got.ID != tc.want { + t.Errorf("FromError(%q)=%s, want %s", tc.err, got.ID, tc.want) + } + + found := false + for _, i := range got.Issues { + if i == tc.issue { + found = true + } + } + if !found { + t.Errorf("Issue %d is not listed in %+v", tc.issue, got.Issues) + } + }) + } +} diff --git a/pkg/minikube/reason/reason.go b/pkg/minikube/reason/reason.go new file mode 100644 index 0000000000..c5a96d9932 --- /dev/null +++ b/pkg/minikube/reason/reason.go @@ -0,0 +1,286 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the Kind{ID: "License", ExitCode: }); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an Kind{ID: "AS IS", ExitCode: } BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reason + +import ( + "fmt" + + "k8s.io/minikube/pkg/minikube/style" +) + +const issueBase = "https://github.com/kubernetes/minikube/issues" + +// Kind describes reason metadata +type Kind struct { + // ID is an unique and stable string describing a reason + ID string + // ExitCode to be used (defaults to 1) + ExitCode int + // Style is what emoji prefix to use for this reason + Style style.Enum + + // Advice is actionable text that the user should follow + Advice string + // URL is a reference URL for more information + URL string + // Issues are a list of related issues to this issue + Issues []int + // Show the new issue link + NewIssueLink bool + // Do not attempt to match this reason to a specific known issue + NoMatch bool +} + +func (k *Kind) IssueURLs() []string { + is := []string{} + for _, i := range k.Issues { + is = append(is, fmt.Sprintf("%s/%d", issueBase, i)) + } + return is +} + +// Sections are ordered roughly by stack dependencies +var ( + Usage = Kind{ID: "MK_USAGE", ExitCode: ExProgramUsage} + Interrupted = Kind{ID: "MK_INTERRUPTED", ExitCode: ExProgramConflict} + + NewAPIClient = Kind{ID: "MK_NEW_APICLIENT", ExitCode: ExProgramError} + InternalAddonEnable = Kind{ID: "MK_ADDON_ENABLE", ExitCode: ExProgramError} + InternalAddConfig = Kind{ID: "MK_ADD_CONFIG", ExitCode: ExProgramError} + InternalBindFlags = Kind{ID: "MK_BIND_FLAGS", ExitCode: ExProgramError} + InternalBootstrapper = Kind{ID: "MK_BOOTSTRAPPER", ExitCode: ExProgramError} + InternalCacheList = Kind{ID: "MK_CACHE_LIST", ExitCode: ExProgramError} + InternalCacheLoad = Kind{ID: "MK_CACHE_LOAD", ExitCode: ExProgramError} + InternalCommandRunner = Kind{ID: "MK_COMMAND_RUNNER", ExitCode: ExProgramError} + InternalCompletion = Kind{ID: "MK_COMPLETION", ExitCode: ExProgramError} + InternalConfigSet = Kind{ID: "MK_CONFIG_SET", ExitCode: ExProgramError} + InternalConfigUnset = Kind{ID: "MK_CONFIG_UNSET", ExitCode: ExProgramError} + InternalConfigView = Kind{ID: "MK_CONFIG_VIEW", ExitCode: ExProgramError} + InternalDelConfig = Kind{ID: "MK_DEL_CONFIG", ExitCode: ExProgramError} + InternalDisable = Kind{ID: "MK_DISABLE", ExitCode: ExProgramError} + InternalDockerScript = Kind{ID: "MK_DOCKER_SCRIPT", ExitCode: ExProgramError} + InternalEnable = Kind{ID: "MK_ENABLE", ExitCode: ExProgramError} + InternalFlagsBind = Kind{ID: "MK_FLAGS_BIND", ExitCode: ExProgramError} + InternalFlagSet = Kind{ID: "MK_FLAGS_SET", ExitCode: ExProgramError} + InternalFormatUsage = Kind{ID: "MK_FORMAT_USAGE", ExitCode: ExProgramError} + InternalGenerateDocs = Kind{ID: "MK_GENERATE_DOCS", ExitCode: ExProgramError} + InternalJSONMarshal = Kind{ID: "MK_JSON_MARSHAL", ExitCode: ExProgramError} + InternalKubernetesClient = Kind{ID: "MK_K8S_CLIENT", ExitCode: ExControlPlaneUnavailable} + InternalListConfig = Kind{ID: "MK_LIST_CONFIG", ExitCode: ExProgramError} + InternalLogtostderrFlag = Kind{ID: "MK_LOGTOSTDERR_FLAG", ExitCode: ExProgramError} + InternalLogFollow = Kind{ID: "MK_LOG_FOLLOW", ExitCode: ExProgramError} + InternalNewRuntime = Kind{ID: "MK_NEW_RUNTIME", ExitCode: ExProgramError} + InternalOutputUsage = Kind{ID: "MK_OUTPUT_USAGE", ExitCode: ExProgramError} + InternalRuntime = Kind{ID: "MK_RUNTIME", ExitCode: ExProgramError} + InternalReservedProfile = Kind{ID: "MK_RESERVED_PROFILE", ExitCode: ExProgramConflict} + InternalEnvScript = Kind{ID: "MK_ENV_SCRIPT", ExitCode: ExProgramError} + InternalShellDetect = Kind{ID: "MK_SHELL_DETECT", ExitCode: ExProgramError} + InternalStatusJSON = Kind{ID: "MK_STATUS_JSON", ExitCode: ExProgramError} + InternalStatusText = Kind{ID: "MK_STATUS_TEXT", ExitCode: ExProgramError} + InternalUnsetScript = Kind{ID: "MK_UNSET_SCRIPT", ExitCode: ExProgramError} + InternalViewExec = Kind{ID: "MK_VIEW_EXEC", ExitCode: ExProgramError} + InternalViewTmpl = Kind{ID: "MK_VIEW_TMPL", ExitCode: ExProgramError} + InternalYamlMarshal = Kind{ID: "MK_YAML_MARSHAL", ExitCode: ExProgramError} + InternalCredsNotFound = Kind{ID: "MK_CREDENTIALS_NOT_FOUND", ExitCode: ExProgramNotFound, Style: style.Shrug} + InternalSemverParse = Kind{ID: "MK_SEMVER_PARSE", ExitCode: ExProgramError} + + RsrcInsufficientCores = Kind{ID: "RSRC_INSUFFICIENT_CORES", ExitCode: ExInsufficientCores, Style: style.UnmetRequirement} + RsrcInsufficientDarwinDockerCores = Kind{ + ID: "RSRC_DOCKER_CORES", + ExitCode: ExInsufficientCores, + Advice: `1. Click on "Docker for Desktop" menu icon + 2. Click "Preferences" + 3. Click "Resources" + 4. Increase "CPUs" slider bar to 2 or higher + 5. Click "Apply & Restart"`, + Style: style.UnmetRequirement, + URL: "https://docs.docker.com/docker-for-mac/#resources", + } + + RsrcInsufficientWindowsDockerCores = Kind{ + ID: "RSRC_DOCKER_CORES", + ExitCode: ExInsufficientCores, + Advice: `1. Open the "Docker Desktop" menu by clicking the Docker icon in the system tray + 2. Click "Settings" + 3. Click "Resources" + 4. Increase "CPUs" slider bar to 2 or higher + 5. Click "Apply & Restart"`, + URL: "https://docs.docker.com/docker-for-windows/#resources", + Style: style.UnmetRequirement, + } + + RsrcInsufficientReqMemory = Kind{ID: "RSRC_INSUFFICIENT_REQ_MEMORY", ExitCode: ExInsufficientMemory, Style: style.UnmetRequirement} + RsrcInsufficientSysMemory = Kind{ID: "RSRC_INSUFFICIENT_SYS_MEMORY", ExitCode: ExInsufficientMemory, Style: style.UnmetRequirement} + RsrcInsufficientContainerMemory = Kind{ID: "RSRC_INSUFFICIENT_CONTAINER_MEMORY", ExitCode: ExInsufficientMemory, Style: style.UnmetRequirement} + RsrcInsufficientWindowsDockerMemory = Kind{ + ID: "RSRC_DOCKER_MEMORY", + ExitCode: ExInsufficientMemory, + Advice: `1. Open the "Docker Desktop" menu by clicking the Docker icon in the system tray + 2. Click "Settings" + 3. Click "Resources" + 4. Increase "Memory" slider bar to {{.recommend}} or higher + 5. Click "Apply & Restart"`, + URL: "https://docs.docker.com/docker-for-windows/#resources", + Style: style.UnmetRequirement, + } + RsrcInsufficientDarwinDockerMemory = Kind{ + ID: "RSRC_DOCKER_MEMORY", + ExitCode: ExInsufficientMemory, + Advice: `1. Click on "Docker for Desktop" menu icon + 2. Click "Preferences" + 3. Click "Resources" + 4. Increase "Memory" slider bar to {{.recommend}} or higher + 5. Click "Apply & Restart"`, + Style: style.UnmetRequirement, + URL: "https://docs.docker.com/docker-for-mac/#resources", + } + + RsrcInsufficientDockerStorage = Kind{ + ID: "RSRC_DOCKER_STORAGE", + ExitCode: ExInsufficientStorage, + Advice: `Try at least one of the following to free up space on the device: + + 1. Run "docker system prune" to remove unused docker data + 2. Increase the amount of memory allocated to Docker for Desktop via + Docker icon > Preferences > Resources > Disk Image Size + 3. Run "minikube ssh -- docker system prune" if using the docker container runtime`, + Issues: []int{9024}, + } + + RsrcInsufficientStorage = Kind{ID: "RSRC_INSUFFICIENT_STORAGE", ExitCode: ExInsufficientStorage, Style: style.UnmetRequirement} + + HostHomeMkdir = Kind{ID: "HOST_HOME_MKDIR", ExitCode: ExHostPermission} + HostHomeChown = Kind{ID: "HOST_HOME_CHOWN", ExitCode: ExHostPermission} + HostBrowser = Kind{ID: "HOST_BROWSER", ExitCode: ExHostError} + HostConfigLoad = Kind{ID: "HOST_CONFIG_LOAD", ExitCode: ExHostConfig} + HostCurrentUser = Kind{ID: "HOST_CURRENT_USER", ExitCode: ExHostConfig} + HostDelCache = Kind{ID: "HOST_DEL_CACHE", ExitCode: ExHostError} + HostKillMountProc = Kind{ID: "HOST_KILL_MOUNT_PROC", ExitCode: ExHostError} + HostKubeconfigUnset = Kind{ID: "HOST_KUBECNOFIG_UNSET", ExitCode: ExHostConfig} + HostKubeconfigUpdate = Kind{ID: "HOST_KUBECONFIG_UPDATE", ExitCode: ExHostConfig} + HostKubectlProxy = Kind{ID: "HOST_KUBECTL_PROXY", ExitCode: ExHostError} + HostMountPid = Kind{ID: "HOST_MOUNT_PID", ExitCode: ExHostError} + HostPathMissing = Kind{ID: "HOST_PATH_MISSING", ExitCode: ExHostNotFound} + HostPathStat = Kind{ID: "HOST_PATH_STAT", ExitCode: ExHostError} + HostPurge = Kind{ID: "HOST_PURGE", ExitCode: ExHostError} + HostSaveProfile = Kind{ID: "HOST_SAVE_PROFILE", ExitCode: ExHostConfig} + + ProviderNotFound = Kind{ID: "PROVIDER_NOT_FOUND", ExitCode: ExProviderNotFound} + ProviderUnavailable = Kind{ID: "PROVIDER_UNAVAILABLE", ExitCode: ExProviderNotFound, Style: style.Shrug} + + DrvCPEndpoint = Kind{ID: "DRV_CP_ENDPOINT", ExitCode: ExDriverError} + DrvPortForward = Kind{ID: "DRV_PORT_FORWARD", ExitCode: ExDriverError} + DrvUnsupportedMulti = Kind{ID: "DRV_UNSUPPORTED_MULTINODE", ExitCode: ExDriverConflict} + DrvUnsupportedOS = Kind{ID: "DRV_UNSUPPORTED_OS", ExitCode: ExDriverUnsupported} + DrvUnsupportedProfile = Kind{ID: "DRV_UNSUPPORTED_PROFILE", ExitCode: ExDriverUnsupported} + DrvNotFound = Kind{ID: "DRV_NOT_FOUND", ExitCode: ExDriverNotFound} + DrvNotDetected = Kind{ID: "DRV_NOT_DETECTED", ExitCode: ExDriverNotFound} + DrvAsRoot = Kind{ID: "DRV_AS_ROOT", ExitCode: ExDriverPermission} + DrvNeedsRoot = Kind{ID: "DRV_NEEDS_ROOT", ExitCode: ExDriverPermission} + + GuestCacheLoad = Kind{ID: "GUEST_CACHE_LOAD", ExitCode: ExGuestError} + GuestCert = Kind{ID: "GUEST_CERT", ExitCode: ExGuestError} + GuestCpConfig = Kind{ID: "GUEST_CP_CONFIG", ExitCode: ExGuestConfig} + GuestDeletion = Kind{ID: "GUEST_DELETION", ExitCode: ExGuestError} + GuestLoadHost = Kind{ID: "GUEST_LOAD_HOST", ExitCode: ExGuestError} + GuestMount = Kind{ID: "GUEST_MOUNT", ExitCode: ExGuestError} + GuestNodeAdd = Kind{ID: "GUEST_NODE_ADD", ExitCode: ExGuestError} + GuestNodeDelete = Kind{ID: "GUEST_NODE_DELETE", ExitCode: ExGuestError} + GuestNodeProvision = Kind{ID: "GUEST_NODE_PROVISION", ExitCode: ExGuestError} + GuestNodeRetrieve = Kind{ID: "GUEST_NODE_RETRIEVE", ExitCode: ExGuestNotFound} + GuestNodeStart = Kind{ID: "GUEST_NODE_START", ExitCode: ExGuestError} + GuestPause = Kind{ID: "GUEST_PAUSE", ExitCode: ExGuestError} + GuestProfileDeletion = Kind{ID: "GUEST_PROFILE_DELETION", ExitCode: ExGuestError} + GuestProvision = Kind{ID: "GUEST_PROVISION", ExitCode: ExGuestError} + GuestStart = Kind{ID: "GUEST_START", ExitCode: ExGuestError} + GuestStatus = Kind{ID: "GUEST_STATUS", ExitCode: ExGuestError} + GuestStopTimeout = Kind{ID: "GUEST_STOP_TIMEOUT", ExitCode: ExGuestTimeout} + GuestUnpause = Kind{ID: "GUEST_UNPAUSE", ExitCode: ExGuestError} + GuestDrvMismatch = Kind{ID: "GUEST_DRIVER_MISMATCH", ExitCode: ExGuestConflict, Style: style.Conflict} + GuestMissingConntrack = Kind{ID: "GUEST_MISSING_CONNTRACK", ExitCode: ExGuestUnsupported} + + IfHostIP = Kind{ID: "IF_HOST_IP", ExitCode: ExLocalNetworkError} + IfMountIP = Kind{ID: "IF_MOUNT_IP", ExitCode: ExLocalNetworkError} + IfMountPort = Kind{ID: "IF_MOUNT_PORT", ExitCode: ExLocalNetworkError} + IfSSHClient = Kind{ID: "IF_SSH_CLIENT", ExitCode: ExLocalNetworkError} + + InetCacheBinaries = Kind{ID: "INET_CACHE_BINARIES", ExitCode: ExInternetError} + InetCacheKubectl = Kind{ID: "INET_CACHE_KUBECTL", ExitCode: ExInternetError} + InetCacheTar = Kind{ID: "INET_CACHE_TAR", ExitCode: ExInternetError} + InetGetVersions = Kind{ID: "INET_GET_VERSIONS", ExitCode: ExInternetError} + InetRepo = Kind{ID: "INET_REPO", ExitCode: ExInternetError} + InetReposUnavailable = Kind{ID: "INET_REPOS_UNAVAILABLE", ExitCode: ExInternetError} + InetVersionUnavailable = Kind{ID: "INET_VERSION_UNAVAILABLE", ExitCode: ExInternetUnavailable} + InetVersionEmpty = Kind{ID: "INET_VERSION_EMPTY", ExitCode: ExInternetConfig} + + RuntimeEnable = Kind{ID: "RUNTIME_ENABLE", ExitCode: ExRuntimeError} + RuntimeCache = Kind{ID: "RUNTIME_CACHE", ExitCode: ExRuntimeError} + RuntimeRestart = Kind{ID: "RUNTIME_RESTART", ExitCode: ExRuntimeError} + + SvcCheckTimeout = Kind{ID: "SVC_CHECK_TIMEOUT", ExitCode: ExSvcTimeout} + SvcTimeout = Kind{ID: "SVC_TIMEOUT", ExitCode: ExSvcTimeout} + SvcList = Kind{ID: "SVC_LIST", ExitCode: ExSvcError} + SvcTunnelStart = Kind{ID: "SVC_TUNNEL_START", ExitCode: ExSvcError} + SvcTunnelStop = Kind{ID: "SVC_TUNNEL_STOP", ExitCode: ExSvcError} + SvcURLTimeout = Kind{ID: "SVC_URL_TIMEOUT", ExitCode: ExSvcTimeout} + SvcNotFound = Kind{ID: "SVC_NOT_FOUND", ExitCode: ExSvcNotFound} + + EnvDriverConflict = Kind{ID: "ENV_DRIVER_CONFLICT", ExitCode: ExDriverConflict} + EnvMultiConflict = Kind{ID: "ENV_MULTINODE_CONFLICT", ExitCode: ExGuestConflict} + EnvDockerUnavailable = Kind{ID: "ENV_DOCKER_UNAVAILABLE", ExitCode: ExRuntimeUnavailable} + EnvPodmanUnavailable = Kind{ID: "ENV_PODMAN_UNAVAILABLE", ExitCode: ExRuntimeUnavailable} + + AddonUnsupported = Kind{ID: "SVC_ADDON_UNSUPPORTED", ExitCode: ExSvcUnsupported} + AddonNotEnabled = Kind{ID: "SVC_ADDON_NOT_ENABLED", ExitCode: ExProgramConflict} + + KubernetesInstallFailed = Kind{ID: "K8S_INSTALL_FAILED", ExitCode: ExControlPlaneError} + KubernetesTooOld = Kind{ID: "K8S_OLD_UNSUPPORTED", ExitCode: ExControlPlaneUnsupported} + KubernetesDowngrade = Kind{ + ID: "K8S_DOWNGRADE_UNSUPPORTED", + ExitCode: ExControlPlaneUnsupported, + Advice: `1) Recreate the cluster with Kubernetes {{.new}}, by running: + + minikube delete{{.profile}} + minikube start{{.profile}} --kubernetes-version={{.prefix}}{{.new}} + + 2) Create a second cluster with Kubernetes {{.new}}, by running: + + minikube start -p {{.suggestedName}} --kubernetes-version={{.prefix}}{{.new}} + + 3) Use the existing cluster at version Kubernetes {{.old}}, by running: + + minikube start{{.profile}} --kubernetes-version={{.prefix}}{{.old}} + `, + Style: style.SeeNoEvil, + } +) diff --git a/pkg/minikube/registry/drvs/docker/docker.go b/pkg/minikube/registry/drvs/docker/docker.go index fbbf66cfe0..d2cfa572e2 100644 --- a/pkg/minikube/registry/drvs/docker/docker.go +++ b/pkg/minikube/registry/drvs/docker/docker.go @@ -49,10 +49,20 @@ func init() { } func configure(cc config.ClusterConfig, n config.Node) (interface{}, error) { + mounts := make([]oci.Mount, len(cc.ContainerVolumeMounts)) + for i, spec := range cc.ContainerVolumeMounts { + var err error + mounts[i], err = oci.ParseMountString(spec) + if err != nil { + return nil, err + } + } + return kic.NewDriver(kic.Config{ MachineName: driver.MachineName(cc, n), StorePath: localpath.MiniPath(), ImageDigest: cc.KicBaseImage, + Mounts: mounts, CPU: cc.CPUs, Memory: cc.Memory, OCIBinary: oci.Docker, @@ -150,17 +160,17 @@ func checkOverlayMod() registry.State { // suggestFix matches a stderr with possible fix for the docker driver func suggestFix(stderr string, err error) registry.State { if strings.Contains(stderr, "permission denied") && runtime.GOOS == "linux" { - return registry.State{Error: err, Installed: true, Healthy: false, Fix: "Add your user to the 'docker' group: 'sudo usermod -aG docker $USER && newgrp docker'", Doc: "https://docs.docker.com/engine/install/linux-postinstall/"} + return registry.State{Error: err, Installed: true, Running: true, Healthy: false, Fix: "Add your user to the 'docker' group: 'sudo usermod -aG docker $USER && newgrp docker'", Doc: "https://docs.docker.com/engine/install/linux-postinstall/"} } if strings.Contains(stderr, "/pipe/docker_engine: The system cannot find the file specified.") && runtime.GOOS == "windows" { - return registry.State{Error: err, Installed: true, Healthy: false, Fix: "Start the Docker service. If Docker is already running, you may need to reset Docker to factory settings with: Settings > Reset.", Doc: "https://github.com/docker/for-win/issues/1825#issuecomment-450501157"} + return registry.State{Error: err, Installed: true, Running: false, Healthy: false, Fix: "Start the Docker service. If Docker is already running, you may need to reset Docker to factory settings with: Settings > Reset.", Doc: "https://github.com/docker/for-win/issues/1825#issuecomment-450501157"} } if strings.Contains(stderr, "Cannot connect") || strings.Contains(stderr, "refused") || strings.Contains(stderr, "Is the docker daemon running") || strings.Contains(stderr, "docker daemon is not running") { - return registry.State{Error: err, Installed: true, Healthy: false, Fix: "Start the Docker service", Doc: docURL} + return registry.State{Error: err, Installed: true, Running: false, Healthy: false, Fix: "Start the Docker service", Doc: docURL} } // We don't have good advice, but at least we can provide a good error message - return registry.State{Error: err, Installed: true, Healthy: false, Doc: docURL} + return registry.State{Error: err, Installed: true, Running: true, Healthy: false, Doc: docURL} } diff --git a/pkg/minikube/registry/drvs/hyperkit/hyperkit.go b/pkg/minikube/registry/drvs/hyperkit/hyperkit.go index 43ce54b6b6..1e3dc9c7b1 100644 --- a/pkg/minikube/registry/drvs/hyperkit/hyperkit.go +++ b/pkg/minikube/registry/drvs/hyperkit/hyperkit.go @@ -96,7 +96,7 @@ func status() registry.State { cmd := exec.CommandContext(ctx, path, "-v") out, err := cmd.CombinedOutput() if err != nil { - return registry.State{Installed: true, Error: fmt.Errorf("%s failed:\n%s", strings.Join(cmd.Args, " "), out), Fix: "Run 'brew install hyperkit'", Doc: docURL} + return registry.State{Installed: true, Running: false, Error: fmt.Errorf("%s failed:\n%s", strings.Join(cmd.Args, " "), out), Fix: "Run 'brew install hyperkit'", Doc: docURL} } // Split version from v0.YYYYMMDD-HH-xxxxxxx or 0.YYYYMMDD to YYYYMMDD @@ -105,13 +105,13 @@ func status() registry.State { // If current hyperkit is not newer than minimumVersion, suggest upgrade information isNew, err := isNewerVersion(currentVersion, specificVersion) if err != nil { - return registry.State{Installed: true, Healthy: true, Error: fmt.Errorf("hyperkit version check failed:\n%v", err), Doc: docURL} + return registry.State{Installed: true, Running: true, Healthy: true, Error: fmt.Errorf("hyperkit version check failed:\n%v", err), Doc: docURL} } if !isNew { - return registry.State{Installed: true, Healthy: true, Error: fmt.Errorf("the installed hyperkit version (0.%s) is older than the minimum recommended version (%s)", currentVersion, minimumVersion), Fix: "Run 'brew upgrade hyperkit'", Doc: docURL} + return registry.State{Installed: true, Running: true, Healthy: true, Error: fmt.Errorf("the installed hyperkit version (0.%s) is older than the minimum recommended version (%s)", currentVersion, minimumVersion), Fix: "Run 'brew upgrade hyperkit'", Doc: docURL} } - return registry.State{Installed: true, Healthy: true} + return registry.State{Installed: true, Running: true, Healthy: true} } // isNewerVersion checks whether current hyperkit is newer than specific version diff --git a/pkg/minikube/registry/drvs/hyperv/hyperv.go b/pkg/minikube/registry/drvs/hyperv/hyperv.go index 8c2b534309..c12f2d826d 100644 --- a/pkg/minikube/registry/drvs/hyperv/hyperv.go +++ b/pkg/minikube/registry/drvs/hyperv/hyperv.go @@ -95,14 +95,14 @@ func status() registry.State { if err != nil { errorMessage := fmt.Errorf("%s failed:\n%s", strings.Join(cmd.Args, " "), out) fixMessage := "Start PowerShell as an Administrator" - return registry.State{Installed: false, Error: errorMessage, Fix: fixMessage, Doc: docURL} + return registry.State{Installed: false, Running: true, Error: errorMessage, Fix: fixMessage, Doc: docURL} } // Get-Wmiobject does not return an error code for false if strings.TrimSpace(string(out)) != "True" { errorMessage := fmt.Errorf("%s returned %q", strings.Join(cmd.Args, " "), out) fixMessage := "Enable Hyper-V: Start PowerShell as Administrator, and run: 'Enable-WindowsOptionalFeature -Online -FeatureName Microsoft-Hyper-V -All'" - return registry.State{Installed: false, Error: errorMessage, Fix: fixMessage, Doc: docURL} + return registry.State{Installed: false, Running: false, Error: errorMessage, Fix: fixMessage, Doc: docURL} } return registry.State{Installed: true, Healthy: true} diff --git a/pkg/minikube/registry/drvs/kvm2/kvm2.go b/pkg/minikube/registry/drvs/kvm2/kvm2.go index fae105572d..a1c1f4442a 100644 --- a/pkg/minikube/registry/drvs/kvm2/kvm2.go +++ b/pkg/minikube/registry/drvs/kvm2/kvm2.go @@ -117,6 +117,7 @@ func status() registry.State { if err != nil { return registry.State{ Installed: true, + Running: true, Error: fmt.Errorf("%s failed:\n%s", strings.Join(cmd.Args, " "), strings.TrimSpace(string(out))), Fix: "Follow your Linux distribution instructions for configuring KVM", Doc: docURL, @@ -129,6 +130,7 @@ func status() registry.State { if err != nil { return registry.State{ Installed: true, + Running: true, Error: fmt.Errorf("%s failed:\n%s", strings.Join(cmd.Args, " "), strings.TrimSpace(string(out))), Fix: "Check that libvirtd is properly installed and that you are a member of the appropriate libvirt group", Doc: docURL, diff --git a/pkg/minikube/registry/drvs/none/none.go b/pkg/minikube/registry/drvs/none/none.go index ec8cea71d8..7afae87987 100644 --- a/pkg/minikube/registry/drvs/none/none.go +++ b/pkg/minikube/registry/drvs/none/none.go @@ -54,16 +54,16 @@ func configure(cc config.ClusterConfig, n config.Node) (interface{}, error) { func status() registry.State { _, err := exec.LookPath("iptables") if err != nil { - return registry.State{Error: err, Fix: "iptables must be installed", Doc: "https://minikube.sigs.k8s.io/docs/reference/drivers/none/"} + return registry.State{Running: true, Error: err, Fix: "iptables must be installed", Doc: "https://minikube.sigs.k8s.io/docs/reference/drivers/none/"} } if _, err := exec.LookPath("docker"); err != nil { - return registry.State{Error: err, Installed: false, Fix: "Install docker", Doc: "https://minikube.sigs.k8s.io/docs/reference/drivers/none/"} + return registry.State{Running: true, Error: err, Installed: false, Fix: "Install docker", Doc: "https://minikube.sigs.k8s.io/docs/reference/drivers/none/"} } u, err := user.Current() if err != nil { - return registry.State{Error: err, Healthy: false, Doc: "https://minikube.sigs.k8s.io/docs/reference/drivers/none/"} + return registry.State{Running: true, Error: err, Healthy: false, Doc: "https://minikube.sigs.k8s.io/docs/reference/drivers/none/"} } if u.Uid != "0" { diff --git a/pkg/minikube/registry/drvs/parallels/parallels.go b/pkg/minikube/registry/drvs/parallels/parallels.go index 1f14bb653a..86acbd556c 100644 --- a/pkg/minikube/registry/drvs/parallels/parallels.go +++ b/pkg/minikube/registry/drvs/parallels/parallels.go @@ -59,5 +59,5 @@ func status() registry.State { if err != nil { return registry.State{Error: err, Fix: "Install docker-machine-driver-parallels", Doc: "https://minikube.sigs.k8s.io/docs/reference/drivers/parallels/"} } - return registry.State{Installed: true, Healthy: true} + return registry.State{Installed: true, Healthy: true, Running: true} } diff --git a/pkg/minikube/registry/drvs/podman/podman.go b/pkg/minikube/registry/drvs/podman/podman.go index ad16ae4681..166fd9e6d5 100644 --- a/pkg/minikube/registry/drvs/podman/podman.go +++ b/pkg/minikube/registry/drvs/podman/podman.go @@ -63,10 +63,20 @@ func init() { } func configure(cc config.ClusterConfig, n config.Node) (interface{}, error) { + mounts := make([]oci.Mount, len(cc.ContainerVolumeMounts)) + for i, spec := range cc.ContainerVolumeMounts { + var err error + mounts[i], err = oci.ParseMountString(spec) + if err != nil { + return nil, err + } + } + return kic.NewDriver(kic.Config{ MachineName: driver.MachineName(cc, n), StorePath: localpath.MiniPath(), ImageDigest: strings.Split(cc.KicBaseImage, "@")[0], // for podman does not support docker images references with both a tag and digest. + Mounts: mounts, CPU: cc.CPUs, Memory: cc.Memory, OCIBinary: oci.Podman, @@ -104,7 +114,7 @@ func status() registry.State { v, err := semver.Make(output) if err != nil { - return registry.State{Error: err, Installed: true, Healthy: false, Fix: "Cant verify minimum required version for podman . See podman website for installation guide.", Doc: "https://podman.io/getting-started/installation.html"} + return registry.State{Error: err, Installed: true, Running: true, Healthy: false, Fix: "Cant verify minimum required version for podman . See podman website for installation guide.", Doc: "https://podman.io/getting-started/installation.html"} } if v.LT(minReqPodmanVer) { @@ -122,7 +132,7 @@ func status() registry.State { // Basic timeout if ctx.Err() == context.DeadlineExceeded { - return registry.State{Error: err, Installed: true, Healthy: false, Fix: "Restart the Podman service", Doc: docURL} + return registry.State{Error: err, Installed: true, Running: false, Healthy: false, Fix: "Restart the Podman service", Doc: docURL} } username := "$USER" diff --git a/pkg/minikube/registry/drvs/virtualbox/virtualbox.go b/pkg/minikube/registry/drvs/virtualbox/virtualbox.go index 4709566f19..6185cf7ee6 100644 --- a/pkg/minikube/registry/drvs/virtualbox/virtualbox.go +++ b/pkg/minikube/registry/drvs/virtualbox/virtualbox.go @@ -73,9 +73,10 @@ func status() registry.State { path, err := exec.LookPath(tryPath) if err != nil { return registry.State{ - Error: fmt.Errorf("unable to find VBoxManage in $PATH"), - Fix: "Install VirtualBox", - Doc: docURL, + Error: fmt.Errorf("unable to find VBoxManage in $PATH"), + Fix: "Install VirtualBox", + Installed: false, + Doc: docURL, } } @@ -89,7 +90,7 @@ func status() registry.State { // Basic timeout if ctx.Err() == context.DeadlineExceeded { glog.Warningf("%q timed out. ", strings.Join(cmd.Args, " ")) - return registry.State{Error: err, Installed: true, Healthy: false, Fix: "Restart VirtualBox", Doc: docURL} + return registry.State{Error: err, Installed: true, Running: false, Healthy: false, Fix: "Restart VirtualBox", Doc: docURL} } if exitErr, ok := err.(*exec.ExitError); ok { diff --git a/pkg/minikube/registry/registry.go b/pkg/minikube/registry/registry.go index 311094bde7..ff0ce69c74 100644 --- a/pkg/minikube/registry/registry.go +++ b/pkg/minikube/registry/registry.go @@ -74,7 +74,8 @@ type StatusChecker func() State type State struct { Installed bool Healthy bool - NeedsImprovement bool // driver is healthy but could be improved + Running bool // it at least appears to be running + NeedsImprovement bool // healthy but could be improved Error error Fix string Doc string diff --git a/pkg/minikube/service/service.go b/pkg/minikube/service/service.go index 732878fc8d..fd60f842c6 100644 --- a/pkg/minikube/service/service.go +++ b/pkg/minikube/service/service.go @@ -38,6 +38,7 @@ import ( "k8s.io/minikube/pkg/kapi" "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/style" "k8s.io/minikube/pkg/util/retry" ) @@ -122,7 +123,7 @@ func GetServiceURLs(api libmachine.API, cname string, namespace string, t *templ return serviceURLs, nil } -// GetServiceURLsForService returns a SvcUrl object for a service in a namespace. Supports optional formatting. +// GetServiceURLsForService returns a SvcURL object for a service in a namespace. Supports optional formatting. func GetServiceURLsForService(api libmachine.API, cname string, namespace, service string, t *template.Template) (SvcURL, error) { host, err := machine.LoadHost(api, cname) if err != nil { @@ -286,7 +287,7 @@ func WaitForService(api libmachine.API, cname string, namespace string, service } if len(serviceURL.URLs) == 0 { - out.T(out.Sad, "service {{.namespace_name}}/{{.service_name}} has no node port", out.V{"namespace_name": namespace, "service_name": service}) + out.T(style.Sad, "service {{.namespace_name}}/{{.service_name}} has no node port", out.V{"namespace_name": namespace, "service_name": service}) return urlList, nil } diff --git a/pkg/minikube/out/style.go b/pkg/minikube/style/style.go similarity index 53% rename from pkg/minikube/out/style.go rename to pkg/minikube/style/style.go index 0be2f467d4..cc722796b0 100644 --- a/pkg/minikube/out/style.go +++ b/pkg/minikube/style/style.go @@ -14,30 +14,27 @@ See the License for the specific language governing permissions and limitations under the License. */ -package out +package style import ( - "bytes" "strings" - "text/template" - - "github.com/golang/glog" - "k8s.io/minikube/pkg/minikube/translate" ) var ( - // lowBullet is a bullet-point prefix for low-fi mode - lowBullet = "* " - // lowBullet is an indented bullet-point prefix for low-fi mode - lowIndent = " - " - // lowBullet is a warning prefix for low-fi mode - lowWarning = "! " - // lowBullet is an error prefix for low-fi mode - lowError = "X " + // LowBullet is a bullet-point prefix for Low-fi mode + LowBullet = "* " + // LowIndent is an indented bullet-point prefix for Low-fi mode + LowIndent = " - " + // LowWarning is a warning prefix for Low-fi mode + LowWarning = "! " + // LowError is an error prefix for Low-fi mode + LowError = "X " + // Indented is how far to indent unstyled text + Indented = " " ) -// style describes how to stylize a message. -type style struct { +// Options describes how to stylize a message. +type Options struct { // Prefix is a string to place in the beginning of a message Prefix string // LowPrefix is the 7-bit compatible prefix we fallback to for less-awesome terminals @@ -46,46 +43,55 @@ type style struct { OmitNewline bool } -// styles is a map of style name to style struct +// Config is a map of style name to style struct // For consistency, ensure that emojis added render with the same width across platforms. -var styles = map[StyleEnum]style{ +var Config = map[Enum]Options{ Celebration: {Prefix: "πŸŽ‰ "}, Check: {Prefix: "βœ… "}, - Command: {Prefix: " β–ͺ ", LowPrefix: lowIndent}, // Indented bullet - Conflict: {Prefix: "πŸ’₯ ", LowPrefix: lowWarning}, + Command: {Prefix: " β–ͺ ", LowPrefix: LowIndent}, // Indented bullet Confused: {Prefix: "πŸ˜• "}, Deleted: {Prefix: "πŸ’€ "}, Documentation: {Prefix: "πŸ“˜ "}, Empty: {Prefix: "", LowPrefix: ""}, - FailureType: {Prefix: "❌ "}, - FatalType: {Prefix: "πŸ’£ ", LowPrefix: lowError}, Happy: {Prefix: "πŸ˜„ "}, - Issue: {Prefix: " β–ͺ ", LowPrefix: lowIndent}, // Indented bullet - Issues: {Prefix: "⁉️ "}, + Issue: {Prefix: " β–ͺ ", LowPrefix: LowIndent}, // Indented bullet + Issues: {Prefix: "🍿 "}, Launch: {Prefix: "πŸš€ "}, LogEntry: {Prefix: " "}, // Indent New: {Prefix: "πŸ†• "}, Notice: {Prefix: "πŸ“Œ "}, - Option: {Prefix: " β–ͺ ", LowPrefix: lowIndent}, // Indented bullet + Option: {Prefix: " β–ͺ ", LowPrefix: LowIndent}, // Indented bullet Pause: {Prefix: "⏸️ "}, Provisioning: {Prefix: "🌱 "}, Ready: {Prefix: "πŸ„ "}, Restarting: {Prefix: "πŸ”„ "}, Running: {Prefix: "πŸƒ "}, - Sad: {Prefix: "😿 "}, - Shrug: {Prefix: "🀷 "}, Sparkle: {Prefix: "✨ "}, Stopped: {Prefix: "πŸ›‘ "}, Stopping: {Prefix: "βœ‹ "}, - SuccessType: {Prefix: "βœ… "}, + Success: {Prefix: "βœ… "}, ThumbsDown: {Prefix: "πŸ‘Ž "}, ThumbsUp: {Prefix: "πŸ‘ "}, Unpause: {Prefix: "⏯️ "}, - URL: {Prefix: "πŸ‘‰ ", LowPrefix: lowIndent}, + URL: {Prefix: "πŸ‘‰ ", LowPrefix: LowIndent}, Usage: {Prefix: "πŸ’‘ "}, Waiting: {Prefix: "βŒ› "}, - Warning: {Prefix: "❗ ", LowPrefix: lowWarning}, - Workaround: {Prefix: "πŸ‘‰ ", LowPrefix: lowIndent}, + Unsupported: {Prefix: "🚑 "}, + Workaround: {Prefix: "πŸ‘‰ ", LowPrefix: LowIndent}, + + // Fail emoji's + Conflict: {Prefix: "πŸ’’ ", LowPrefix: LowWarning}, + Failure: {Prefix: "❌ ", LowPrefix: LowError}, + Fatal: {Prefix: "πŸ’£ ", LowPrefix: LowError}, + Warning: {Prefix: "❗ ", LowPrefix: LowWarning}, + KnownIssue: {Prefix: "🧯 ", LowPrefix: LowError}, + UnmetRequirement: {Prefix: "β›” ", LowPrefix: LowError}, + NotAllowed: {Prefix: "🚫 ", LowPrefix: LowError}, + Embarrassed: {Prefix: "🀦 ", LowPrefix: LowWarning}, + Sad: {Prefix: "😿 "}, + Shrug: {Prefix: "🀷 "}, + Improvement: {Prefix: "πŸ’¨ ", LowPrefix: LowWarning}, + SeeNoEvil: {Prefix: "πŸ™ˆ ", LowPrefix: LowError}, // Specialized purpose styles AddonDisable: {Prefix: "πŸŒ‘ "}, @@ -100,7 +106,6 @@ var styles = map[StyleEnum]style{ DeletingHost: {Prefix: "πŸ”₯ "}, Docker: {Prefix: "🐳 "}, DryRun: {Prefix: "🌡 "}, - Embarrassed: {Prefix: "🀦 ", LowPrefix: lowWarning}, Enabling: {Prefix: "πŸ”Œ "}, FileDownload: {Prefix: "πŸ’Ύ "}, Fileserver: {Prefix: "πŸš€ ", OmitNewline: true}, @@ -108,7 +113,7 @@ var styles = map[StyleEnum]style{ Internet: {Prefix: "🌐 "}, ISODownload: {Prefix: "πŸ’Ώ "}, Kubectl: {Prefix: "πŸ’— "}, - Meh: {Prefix: "πŸ™„ ", LowPrefix: lowWarning}, + Meh: {Prefix: "πŸ™„ ", LowPrefix: LowWarning}, Mounting: {Prefix: "πŸ“ "}, MountOptions: {Prefix: "πŸ’Ύ "}, Permissions: {Prefix: "πŸ”‘ "}, @@ -125,69 +130,13 @@ var styles = map[StyleEnum]style{ CNI: {Prefix: "πŸ”— "}, } -// Add a prefix to a string -func applyPrefix(prefix, format string) string { - if prefix == "" { - return format - } - // TODO(tstromberg): Ensure compatibility with RTL languages. - return prefix + format -} - -// lowPrefix returns a 7-bit compatible prefix for a style -func lowPrefix(s style) string { +// LowPrefix returns a 7-bit compatible prefix for a style +func LowPrefix(s Options) string { if s.LowPrefix != "" { return s.LowPrefix } if strings.HasPrefix(s.Prefix, " ") { - return lowIndent + return LowIndent } - return lowBullet -} - -// applyStyle translates the given string if necessary then adds any appropriate style prefix. -func applyStyle(style StyleEnum, useColor bool, format string) string { - format = translate.T(format) - - s, ok := styles[style] - if !s.OmitNewline { - format += "\n" - } - - // Similar to CSS styles, if no style matches, output an unformatted string. - if !ok || JSON { - return format - } - - if !useColor { - return applyPrefix(lowPrefix(s), format) - } - return applyPrefix(s.Prefix, format) -} - -// ApplyTemplateFormatting applies formatting to the provided template -func ApplyTemplateFormatting(style StyleEnum, useColor bool, format string, a ...V) string { - if a == nil { - a = []V{{}} - } - format = applyStyle(style, useColor, format) - - var buf bytes.Buffer - t, err := template.New(format).Parse(format) - if err != nil { - glog.Errorf("unable to parse %q: %v - returning raw string.", format, err) - return format - } - err = t.Execute(&buf, a[0]) - if err != nil { - glog.Errorf("unable to execute %s: %v - returning raw string.", format, err) - return format - } - outStyled := buf.String() - - // escape any outstanding '%' signs so that they don't get interpreted - // as a formatting directive down the line - outStyled = strings.Replace(outStyled, "%", "%%", -1) - - return outStyled + return LowBullet } diff --git a/pkg/minikube/out/style_enum.go b/pkg/minikube/style/style_enum.go similarity index 86% rename from pkg/minikube/out/style_enum.go rename to pkg/minikube/style/style_enum.go index e6604eed9d..6489e1ab3b 100644 --- a/pkg/minikube/out/style_enum.go +++ b/pkg/minikube/style/style_enum.go @@ -14,14 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -package out +package style -// StyleEnum is an enumeration of Style -type StyleEnum int +// Enum is an enumeration of Style +type Enum int // All the Style constants available const ( - AddonDisable StyleEnum = iota + None Enum = iota + AddonDisable AddonEnable Caching Celebrate @@ -43,17 +44,19 @@ const ( Embarrassed Empty Enabling - FailureType - FatalType + Failure + Fatal FileDownload Fileserver Happy HealthCheck + Improvement Internet ISODownload Issue Issues Kubectl + KnownIssue Launch LogEntry Meh @@ -61,6 +64,7 @@ const ( MountOptions New Notice + NotAllowed Option Pause Permissions @@ -72,6 +76,7 @@ const ( Restarting Running Sad + SeeNoEvil Shrug Shutdown Sparkle @@ -79,12 +84,14 @@ const ( StartingVM Stopped Stopping - SuccessType + Success ThumbsDown ThumbsUp Tip Unmount Unpause + UnmetRequirement + Unsupported URL Usage Verifying diff --git a/pkg/minikube/style/style_test.go b/pkg/minikube/style/style_test.go new file mode 100644 index 0000000000..f17d36ec94 --- /dev/null +++ b/pkg/minikube/style/style_test.go @@ -0,0 +1,55 @@ +/* +Copyright 2019 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package style + +import "testing" + +func TestLowPrefix(t *testing.T) { + tests := []struct { + expected string + description string + style Options + }{ + { + expected: LowBullet, + description: "empty prefix", + }, + { + expected: "bar", + style: Options{LowPrefix: "bar"}, + description: "lowPrefix", + }, + { + expected: LowBullet, + style: Options{Prefix: "foo"}, + description: "prefix without spaces", + }, + { + expected: LowIndent, + style: Options{Prefix: " foo"}, + description: "prefix with spaces", + }, + } + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + got := LowPrefix(test.style) + if got != test.expected { + t.Errorf("Expected %v but got %v", test.expected, got) + } + }) + } +} diff --git a/pkg/minikube/tunnel/kic/ssh_conn.go b/pkg/minikube/tunnel/kic/ssh_conn.go index 9c0135ea60..caedabadbd 100644 --- a/pkg/minikube/tunnel/kic/ssh_conn.go +++ b/pkg/minikube/tunnel/kic/ssh_conn.go @@ -24,6 +24,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/style" ) type sshConn struct { @@ -68,12 +69,12 @@ func createSSHConn(name, sshPort, sshKey string, svc *v1.Service) *sshConn { if askForSudo { out.T( - out.Warning, + style.Warning, "The service {{.service}} requires privileged ports to be exposed: {{.ports}}", out.V{"service": svc.Name, "ports": fmt.Sprintf("%v", privilegedPorts)}, ) - out.T(out.Permissions, "sudo permission will be asked for it.") + out.T(style.Permissions, "sudo permission will be asked for it.") command = "sudo" sshArgs = append([]string{"ssh"}, sshArgs...) @@ -130,7 +131,7 @@ func createSSHConnWithRandomPorts(name, sshPort, sshKey string, svc *v1.Service) } func (c *sshConn) startAndWait() error { - out.T(out.Running, "Starting tunnel for service {{.service}}.", out.V{"service": c.service}) + out.T(style.Running, "Starting tunnel for service {{.service}}.", out.V{"service": c.service}) err := c.cmd.Start() if err != nil { @@ -144,7 +145,7 @@ func (c *sshConn) startAndWait() error { } func (c *sshConn) stop() error { - out.T(out.Stopping, "Stopping tunnel for service {{.service}}.", out.V{"service": c.service}) + out.T(style.Stopping, "Stopping tunnel for service {{.service}}.", out.V{"service": c.service}) return c.cmd.Process.Kill() } diff --git a/pkg/storage/storage_provisioner.go b/pkg/storage/storage_provisioner.go index 5f03066e2c..c0e815b022 100644 --- a/pkg/storage/storage_provisioner.go +++ b/pkg/storage/storage_provisioner.go @@ -56,8 +56,8 @@ var _ controller.Provisioner = &hostPathProvisioner{} // Provision creates a storage asset and returns a PV object representing it. func (p *hostPathProvisioner) Provision(options controller.ProvisionOptions) (*core.PersistentVolume, error) { - glog.Infof("Provisioning volume %v", options) - path := path.Join(p.pvDir, options.PVC.Name) + path := path.Join(p.pvDir, options.PVC.Namespace, options.PVC.Name) + glog.Infof("Provisioning volume %v to %s", options, path) if err := os.MkdirAll(path, 0777); err != nil { return nil, err } diff --git a/pkg/util/lock/lock.go b/pkg/util/lock/lock.go index 8d2e833ab0..e9bdb773c5 100644 --- a/pkg/util/lock/lock.go +++ b/pkg/util/lock/lock.go @@ -40,10 +40,7 @@ func WriteFile(filename string, data []byte, perm os.FileMode) error { defer releaser.Release() - if err = ioutil.WriteFile(filename, data, perm); err != nil { - return errors.Wrapf(err, "writefile failed for %s", filename) - } - return err + return ioutil.WriteFile(filename, data, perm) } // PathMutexSpec returns a mutex spec for a path diff --git a/site/content/en/docs/commands/addons.md b/site/content/en/docs/commands/addons.md index 67828ebb38..b35589cfe4 100644 --- a/site/content/en/docs/commands/addons.md +++ b/site/content/en/docs/commands/addons.md @@ -5,7 +5,6 @@ description: > --- - ## minikube addons Enable or disable a minikube addon diff --git a/site/content/en/docs/commands/cache.md b/site/content/en/docs/commands/cache.md index 12578a8ae6..4d5cc338a6 100644 --- a/site/content/en/docs/commands/cache.md +++ b/site/content/en/docs/commands/cache.md @@ -5,7 +5,6 @@ description: > --- - ## minikube cache Add, delete, or push a local image into minikube diff --git a/site/content/en/docs/commands/completion.md b/site/content/en/docs/commands/completion.md index 37be47a1b4..09f118c9f0 100644 --- a/site/content/en/docs/commands/completion.md +++ b/site/content/en/docs/commands/completion.md @@ -5,7 +5,6 @@ description: > --- - ## minikube completion Generate command completion for a shell diff --git a/site/content/en/docs/commands/config.md b/site/content/en/docs/commands/config.md index a77338047e..929398e2c0 100644 --- a/site/content/en/docs/commands/config.md +++ b/site/content/en/docs/commands/config.md @@ -5,7 +5,6 @@ description: > --- - ## minikube config Modify persistent configuration values diff --git a/site/content/en/docs/commands/dashboard.md b/site/content/en/docs/commands/dashboard.md index 2b384d77ce..a7f04178e3 100644 --- a/site/content/en/docs/commands/dashboard.md +++ b/site/content/en/docs/commands/dashboard.md @@ -5,7 +5,6 @@ description: > --- - ## minikube dashboard Access the Kubernetes dashboard running within the minikube cluster diff --git a/site/content/en/docs/commands/delete.md b/site/content/en/docs/commands/delete.md index a9b1255388..24bd1047fb 100644 --- a/site/content/en/docs/commands/delete.md +++ b/site/content/en/docs/commands/delete.md @@ -5,7 +5,6 @@ description: > --- - ## minikube delete Deletes a local Kubernetes cluster diff --git a/site/content/en/docs/commands/docker-env.md b/site/content/en/docs/commands/docker-env.md index e2a886c73f..b08c05054a 100644 --- a/site/content/en/docs/commands/docker-env.md +++ b/site/content/en/docs/commands/docker-env.md @@ -5,7 +5,6 @@ description: > --- - ## minikube docker-env Configure environment to use minikube's Docker daemon diff --git a/site/content/en/docs/commands/help.md b/site/content/en/docs/commands/help.md index d737c6bdb7..26c6768c76 100644 --- a/site/content/en/docs/commands/help.md +++ b/site/content/en/docs/commands/help.md @@ -5,7 +5,6 @@ description: > --- - ## minikube help Help about any command diff --git a/site/content/en/docs/commands/ip.md b/site/content/en/docs/commands/ip.md index 0556157bc9..b9408e3f53 100644 --- a/site/content/en/docs/commands/ip.md +++ b/site/content/en/docs/commands/ip.md @@ -5,7 +5,6 @@ description: > --- - ## minikube ip Retrieves the IP address of the running cluster diff --git a/site/content/en/docs/commands/kubectl.md b/site/content/en/docs/commands/kubectl.md index db94331beb..e9ed8281da 100644 --- a/site/content/en/docs/commands/kubectl.md +++ b/site/content/en/docs/commands/kubectl.md @@ -5,7 +5,6 @@ description: > --- - ## minikube kubectl Run a kubectl binary matching the cluster version diff --git a/site/content/en/docs/commands/logs.md b/site/content/en/docs/commands/logs.md index 5142cdaf21..ac90d37e85 100644 --- a/site/content/en/docs/commands/logs.md +++ b/site/content/en/docs/commands/logs.md @@ -5,7 +5,6 @@ description: > --- - ## minikube logs Returns logs to debug a local Kubernetes cluster diff --git a/site/content/en/docs/commands/mount.md b/site/content/en/docs/commands/mount.md index cbd2a4fd58..c9afe4ace7 100644 --- a/site/content/en/docs/commands/mount.md +++ b/site/content/en/docs/commands/mount.md @@ -5,7 +5,6 @@ description: > --- - ## minikube mount Mounts the specified directory into minikube diff --git a/site/content/en/docs/commands/node.md b/site/content/en/docs/commands/node.md index 034af6360d..031f3c732c 100644 --- a/site/content/en/docs/commands/node.md +++ b/site/content/en/docs/commands/node.md @@ -5,7 +5,6 @@ description: > --- - ## minikube node Add, remove, or list additional nodes diff --git a/site/content/en/docs/commands/pause.md b/site/content/en/docs/commands/pause.md index c27427e450..10c7102182 100644 --- a/site/content/en/docs/commands/pause.md +++ b/site/content/en/docs/commands/pause.md @@ -5,7 +5,6 @@ description: > --- - ## minikube pause pause Kubernetes diff --git a/site/content/en/docs/commands/podman-env.md b/site/content/en/docs/commands/podman-env.md index 00c0e7d954..a15a0b22ad 100644 --- a/site/content/en/docs/commands/podman-env.md +++ b/site/content/en/docs/commands/podman-env.md @@ -5,7 +5,6 @@ description: > --- - ## minikube podman-env Configure environment to use minikube's Podman service diff --git a/site/content/en/docs/commands/profile.md b/site/content/en/docs/commands/profile.md index 344c63360a..15621db2cb 100644 --- a/site/content/en/docs/commands/profile.md +++ b/site/content/en/docs/commands/profile.md @@ -5,7 +5,6 @@ description: > --- - ## minikube profile Get or list the current profiles (clusters) diff --git a/site/content/en/docs/commands/service.md b/site/content/en/docs/commands/service.md index e5fb1be2f9..d0a356428f 100644 --- a/site/content/en/docs/commands/service.md +++ b/site/content/en/docs/commands/service.md @@ -5,7 +5,6 @@ description: > --- - ## minikube service Returns a URL to connect to a service diff --git a/site/content/en/docs/commands/ssh-key.md b/site/content/en/docs/commands/ssh-key.md index 6f5f489e0e..19ed9300cd 100644 --- a/site/content/en/docs/commands/ssh-key.md +++ b/site/content/en/docs/commands/ssh-key.md @@ -5,7 +5,6 @@ description: > --- - ## minikube ssh-key Retrieve the ssh identity key path of the specified cluster diff --git a/site/content/en/docs/commands/ssh.md b/site/content/en/docs/commands/ssh.md index 921bfa2a0a..1c97d108c4 100644 --- a/site/content/en/docs/commands/ssh.md +++ b/site/content/en/docs/commands/ssh.md @@ -5,7 +5,6 @@ description: > --- - ## minikube ssh Log into the minikube environment (for debugging) diff --git a/site/content/en/docs/commands/start.md b/site/content/en/docs/commands/start.md index 9b9754d0d4..2ae0296d26 100644 --- a/site/content/en/docs/commands/start.md +++ b/site/content/en/docs/commands/start.md @@ -5,7 +5,6 @@ description: > --- - ## minikube start Starts a local Kubernetes cluster @@ -27,7 +26,7 @@ minikube start [flags] --apiserver-names stringArray A set of apiserver names which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine --apiserver-port int The apiserver listening port (default 8443) --auto-update-drivers If set, automatically updates drivers to the latest version. Defaults to true. (default true) - --base-image string The base image to use for docker/podman drivers. Intended for local development. (default "gcr.io/k8s-minikube/kicbase:v0.0.12-snapshot@sha256:7be40a42fdfec56fbf7bc9de07ea2ed4a931cbb70dccb8612b2ba13763bf4568") + --base-image string The base image to use for docker/podman drivers. Intended for local development. (default "gcr.io/k8s-minikube/kicbase:v0.0.12-snapshot3@sha256:1d687ba53e19dbe5fafe4cc18aa07f269ecc4b7b622f2251b5bf569ddb474e9b") --cache-images If true, cache docker images for the current bootstrapper and load them into the machine. Always false with --driver=none. (default true) --cni string CNI plug-in to use. Valid options: auto, bridge, calico, cilium, flannel, kindnet, or path to a CNI manifest (default: auto) --container-runtime string The container runtime to be used (docker, cri-o, containerd). (default "docker") @@ -68,7 +67,7 @@ minikube start [flags] --interactive Allow user prompts for more information (default true) --iso-url strings Locations to fetch the minikube ISO from. (default [https://storage.googleapis.com/minikube/iso/minikube-v1.12.2.iso,https://github.com/kubernetes/minikube/releases/download/v1.12.2/minikube-v1.12.2.iso,https://kubernetes.oss-cn-hangzhou.aliyuncs.com/minikube/iso/minikube-v1.12.2.iso]) --keep-context This will keep the existing kubectl context and will create a minikube context. - --kubernetes-version string The Kubernetes version that the minikube VM will use (ex: v1.2.3, 'stable' for v1.18.3, 'latest' for v1.19.0-rc.4). Defaults to 'stable'. + --kubernetes-version string The Kubernetes version that the minikube VM will use (ex: v1.2.3, 'stable' for v1.19.0, 'latest' for v1.19.0). Defaults to 'stable'. --kvm-gpu Enable experimental NVIDIA GPU support in minikube --kvm-hidden Hide the hypervisor signature from the guest in minikube (kvm2 driver only) --kvm-network string The KVM network name. (kvm2 driver only) (default "default") @@ -83,7 +82,7 @@ minikube start [flags] --nfs-shares-root string Where to root the NFS Shares, defaults to /nfsshares (hyperkit driver only) (default "/nfsshares") --no-vtx-check Disable checking for the availability of hardware virtualization before the vm is started (virtualbox driver only) -n, --nodes int The number of nodes to spin up. Defaults to 1. (default 1) - --output string Format to print stdout in. Options include: [text,json] (default "text") + -o, --output string Format to print stdout in. Options include: [text,json] (default "text") --preload If set, download tarball of preloaded images if available to improve start time. Defaults to true. (default true) --registry-mirror strings Registry mirrors to pass to the Docker daemon --service-cluster-ip-range string The CIDR to be used for service cluster IPs. (default "10.96.0.0/12") diff --git a/site/content/en/docs/commands/status.md b/site/content/en/docs/commands/status.md index 90964a1437..a63f6a0875 100644 --- a/site/content/en/docs/commands/status.md +++ b/site/content/en/docs/commands/status.md @@ -5,7 +5,6 @@ description: > --- - ## minikube status Gets the status of a local Kubernetes cluster diff --git a/site/content/en/docs/commands/stop.md b/site/content/en/docs/commands/stop.md index 16046b8373..f10b9f6f7e 100644 --- a/site/content/en/docs/commands/stop.md +++ b/site/content/en/docs/commands/stop.md @@ -5,7 +5,6 @@ description: > --- - ## minikube stop Stops a running local Kubernetes cluster diff --git a/site/content/en/docs/commands/tunnel.md b/site/content/en/docs/commands/tunnel.md index 81276e192a..bfb8d6ddff 100644 --- a/site/content/en/docs/commands/tunnel.md +++ b/site/content/en/docs/commands/tunnel.md @@ -5,7 +5,6 @@ description: > --- - ## minikube tunnel Connect to LoadBalancer services diff --git a/site/content/en/docs/commands/unpause.md b/site/content/en/docs/commands/unpause.md index 3ada57bfce..e97b7e9988 100644 --- a/site/content/en/docs/commands/unpause.md +++ b/site/content/en/docs/commands/unpause.md @@ -5,7 +5,6 @@ description: > --- - ## minikube unpause unpause Kubernetes diff --git a/site/content/en/docs/commands/update-check.md b/site/content/en/docs/commands/update-check.md index d479854faa..6fd0a8e818 100644 --- a/site/content/en/docs/commands/update-check.md +++ b/site/content/en/docs/commands/update-check.md @@ -5,7 +5,6 @@ description: > --- - ## minikube update-check Print current and latest version number diff --git a/site/content/en/docs/commands/update-context.md b/site/content/en/docs/commands/update-context.md index cefadf5dd4..4949b66d59 100644 --- a/site/content/en/docs/commands/update-context.md +++ b/site/content/en/docs/commands/update-context.md @@ -5,7 +5,6 @@ description: > --- - ## minikube update-context Update kubeconfig in case of an IP or port change diff --git a/site/content/en/docs/commands/version.md b/site/content/en/docs/commands/version.md index 39fc166ea8..f66f9329b5 100644 --- a/site/content/en/docs/commands/version.md +++ b/site/content/en/docs/commands/version.md @@ -5,7 +5,6 @@ description: > --- - ## minikube version Print the version of minikube diff --git a/site/content/en/docs/contrib/guide.en.md b/site/content/en/docs/contrib/guide.en.md index b546dce37a..3b54f44aea 100644 --- a/site/content/en/docs/contrib/guide.en.md +++ b/site/content/en/docs/contrib/guide.en.md @@ -35,6 +35,7 @@ Once you've discovered an issue to work on: 2. A reviewer will respond to your issue promptly. 3. If your proposed change is accepted, and you haven't already done so, sign the [Contributor License Agreement (CLA)](https://git.k8s.io/community/CLA.md) 4. Fork the minikube repository, develop and test your code changes. + * Before test, you may need to install some [prerequisites](https://minikube.sigs.k8s.io/docs/contrib/testing/#prerequisites). 5. Submit a pull request. ## Contributing larger changes diff --git a/site/content/en/docs/contrib/testing.en.md b/site/content/en/docs/contrib/testing.en.md index 2f88cf99d9..bebabf987a 100644 --- a/site/content/en/docs/contrib/testing.en.md +++ b/site/content/en/docs/contrib/testing.en.md @@ -6,7 +6,25 @@ description: > How to run tests --- -### Unit Tests +## Prerequisites + +* Go distribution + * Specific version depends on minikube version. + * The current dependency version can be found here : [master branch's go.mod file](https://github.com/kubernetes/minikube/blob/master/go.mod). +* If you are on Linux, you will need to install `libvirt-dev`, since unit tests need kvm2 driver: + +```shell +# For Debian based +sudo apt-get install libvirt-dev + +# For Centos +yum install libvirt-devel + +# For Fedora +dnf install libvirt-devel +``` + +## Unit Tests Unit tests are run on Travis before code is merged. To run as part of a development cycle: @@ -14,9 +32,9 @@ Unit tests are run on Travis before code is merged. To run as part of a developm make test ``` -### Integration Tests +## Integration Tests -#### The basics +### The basics From the minikube root directory, build the binary and run the tests: @@ -30,7 +48,7 @@ You may find it useful to set various options to test only a particular test aga env TEST_ARGS="-minikube-start-args=--driver=hyperkit -test.run TestStartStop" make integration ``` -#### Quickly iterating on a single test +### Quickly iterating on a single test Run a single test on an active cluster: @@ -44,13 +62,13 @@ The `--cleanup=false` test arg ensures that the cluster will not be deleted afte See [main.go](https://github.com/kubernetes/minikube/blob/master/test/integration/main.go) for details. -#### Disabling parallelism +### Disabling parallelism ```shell make integration -e TEST_ARGS="-test.parallel=1" ``` -#### Testing philosophy +### Testing philosophy - Tests should be so simple as to be correct by inspection - Readers should need to read only the test body to understand the test @@ -58,7 +76,7 @@ make integration -e TEST_ARGS="-test.parallel=1" Tests are typically read with a great air of skepticism, because chances are they are being read only when things are broken. -### Conformance Tests +## Conformance Tests These are Kubernetes tests that run against an arbitrary cluster and exercise a wide range of Kubernetes features. You can run these against minikube by following these steps: diff --git a/site/content/en/docs/drivers/includes/none_usage.inc b/site/content/en/docs/drivers/includes/none_usage.inc index bf36312abf..e1664e6199 100644 --- a/site/content/en/docs/drivers/includes/none_usage.inc +++ b/site/content/en/docs/drivers/includes/none_usage.inc @@ -2,16 +2,18 @@ A Linux VM with the following: -* Docker -* systemd (OpenRC based systems are also supported in v1.10+) +* systemd or OpenRC +* a container runtime, such as Docker or CRIO -This VM must also meet the [kubeadm requirements.](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/) +This VM must also meet the [kubeadm requirements](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/), such as: -Kubernetes v1.18+ needs conntract to be installed on debian based machines. - -```shell - sudo apt-get install conntrack -y -``` +* 2 CPU's +* 2GB RAM +* iptables (in legacy mode) +* conntrack +* crictl +* SELinux permissive +* cgroups v1 (v2 is not yet supported by Kubernetes) ## Usage diff --git a/site/content/en/docs/faq/_index.md b/site/content/en/docs/faq/_index.md index 0eb2a1b288..0539c92f28 100644 --- a/site/content/en/docs/faq/_index.md +++ b/site/content/en/docs/faq/_index.md @@ -29,3 +29,8 @@ minikube's bootstrapper, [Kubeadm](https://github.com/kubernetes/kubeadm) verifi Please allocate sufficient resources for Knative setup using minikube, especially when you run a minikube cluster on your local machine. We recommend allocating at least 6 CPUs and 8G memory. `minikube start --cpus 6 --memory 8000` + +## Do I need to install kubectl locally? + +No, minikube comes with built-in kubectl [see minikube's kubectl documentation]({{< ref "docs/handbook/kubectl.md" >}}). + diff --git a/site/content/en/docs/handbook/accessing.md b/site/content/en/docs/handbook/accessing.md index 2d7ceb73e9..6289cc8588 100644 --- a/site/content/en/docs/handbook/accessing.md +++ b/site/content/en/docs/handbook/accessing.md @@ -123,7 +123,7 @@ Each service will get its own external ip. If you are on macOS, the tunnel command also allows DNS resolution for Kubernetes services from the host. -NOTE: docker driver doesn't suport DNS resolution +NOTE: docker driver doesn't support DNS resolution ### Cleaning up orphaned routes diff --git a/site/content/en/docs/handbook/kubectl.md b/site/content/en/docs/handbook/kubectl.md index 5f21ff4f20..21532287d3 100644 --- a/site/content/en/docs/handbook/kubectl.md +++ b/site/content/en/docs/handbook/kubectl.md @@ -17,6 +17,10 @@ as well. You can also `alias kubectl="minikube kubectl --"` for easier usage. +Alternatively, you can create a symbolic link to minikube's binary named 'kubectl'. + +`ln -s $(which minikube) /usr/local/bin/kubectl` + Get pods `minikube kubectl -- get pods` diff --git a/site/content/en/docs/tutorials/setup_minikube_in_github_actions.md b/site/content/en/docs/tutorials/setup_minikube_in_github_actions.md index 9b1fd6fbd4..1b4031425f 100644 --- a/site/content/en/docs/tutorials/setup_minikube_in_github_actions.md +++ b/site/content/en/docs/tutorials/setup_minikube_in_github_actions.md @@ -37,7 +37,7 @@ Create workflow: jobs: job1: runs-on: ubuntu-latest - name: build example and deploy to minikbue + name: build example and deploy to minikube steps: - uses: actions/checkout@v2 - name: Start minikube diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go index 4c4c88b7d4..173be340ee 100644 --- a/test/integration/functional_test.go +++ b/test/integration/functional_test.go @@ -40,6 +40,7 @@ import ( "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/localpath" + "k8s.io/minikube/pkg/minikube/reason" "k8s.io/minikube/pkg/util/retry" "github.com/elazarl/goproxy" @@ -86,6 +87,7 @@ func TestFunctional(t *testing.T) { {"KubectlGetPods", validateKubectlGetPods}, // Make sure apiserver is up {"CacheCmd", validateCacheCmd}, // Caches images needed for subsequent tests because of proxy {"MinikubeKubectlCmd", validateMinikubeKubectl}, // Make sure `minikube kubectl` works + {"MinikubeKubectlCmdDirectly", validateMinikubeKubectlDirectCall}, } for _, tc := range tests { tc := tc @@ -314,6 +316,26 @@ func validateMinikubeKubectl(ctx context.Context, t *testing.T, profile string) } } +// validateMinikubeKubectlDirectCall validates that calling minikube's kubectl +func validateMinikubeKubectlDirectCall(ctx context.Context, t *testing.T, profile string) { + defer PostMortemLogs(t, profile) + dir := filepath.Dir(Target()) + dstfn := filepath.Join(dir, "kubectl") + err := os.Link(Target(), dstfn) + + if err != nil { + t.Fatal(err) + } + defer os.Remove(dstfn) // clean up + + kubectlArgs := []string{"get", "pods"} + rr, err := Run(t, exec.CommandContext(ctx, dstfn, kubectlArgs...)) + if err != nil { + t.Fatalf("failed to run kubectl directl. args %q: %v", rr.Command(), err) + } + +} + // validateComponentHealth asserts that all Kubernetes components are healthy func validateComponentHealth(ctx context.Context, t *testing.T, profile string) { defer PostMortemLogs(t, profile) @@ -450,7 +472,7 @@ func validateDryRun(ctx context.Context, t *testing.T, profile string) { c := exec.CommandContext(mctx, Target(), startArgs...) rr, err := Run(t, c) - wantCode := 78 // exit.Config + wantCode := reason.ExInsufficientMemory if rr.ExitCode != wantCode { t.Errorf("dry-run(250MB) exit code = %d, wanted = %d: %v", rr.ExitCode, wantCode, err) } diff --git a/test/integration/json_output_test.go b/test/integration/json_output_test.go index 9ec20b8aa9..b5085dffd7 100644 --- a/test/integration/json_output_test.go +++ b/test/integration/json_output_test.go @@ -27,8 +27,8 @@ import ( "testing" cloudevents "github.com/cloudevents/sdk-go/v2" - "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/out/register" + "k8s.io/minikube/pkg/minikube/reason" ) func TestJSONOutput(t *testing.T) { @@ -122,8 +122,8 @@ func TestJSONOutputError(t *testing.T) { if last.Type() != register.NewError("").Type() { t.Fatalf("last cloud event is not of type error: %v", last) } - last.validateData(t, "exitcode", fmt.Sprintf("%v", exit.Unavailable)) - last.validateData(t, "message", fmt.Sprintf("The driver 'fail' is not supported on %s\n", runtime.GOOS)) + last.validateData(t, "exitcode", fmt.Sprintf("%v", reason.ExDriverUnsupported)) + last.validateData(t, "message", fmt.Sprintf("The driver 'fail' is not supported on %s", runtime.GOOS)) } type cloudEvent struct { @@ -149,7 +149,7 @@ func (c *cloudEvent) validateData(t *testing.T, key, value string) { t.Fatalf("expected key %s does not exist in cloud event", key) } if v != value { - t.Fatalf("values in cloud events do not match:\nActual:\n%v\nExpected:\n%v\n", v, value) + t.Fatalf("values in cloud events do not match:\nActual:\n'%v'\nExpected:\n'%v'\n", v, value) } } diff --git a/test/integration/skaffold_test.go b/test/integration/skaffold_test.go index ad02ecaac9..48f9b42ed4 100644 --- a/test/integration/skaffold_test.go +++ b/test/integration/skaffold_test.go @@ -29,8 +29,8 @@ import ( "testing" "time" - "github.com/docker/machine/libmachine/mcnutils" "github.com/hashicorp/go-getter" + "github.com/otiai10/copy" "k8s.io/minikube/pkg/util/retry" ) @@ -38,9 +38,8 @@ func TestSkaffold(t *testing.T) { if NoneDriver() { t.Skip("none driver doesn't support `minikube docker-env`; skaffold depends on this command") } - // can't use a unique profile, as skaffold only recognizes the - // profile name 'minikube' as a local cluster - profile := "minikube" + + profile := UniqueProfileName("skaffold") ctx, cancel := context.WithTimeout(context.Background(), Minutes(5)) defer CleanupWithLogs(t, profile, cancel) @@ -51,32 +50,49 @@ func TestSkaffold(t *testing.T) { } defer os.Remove(tf.Name()) - // start minikube cluster - args := append([]string{"start", "-p", profile, "--memory=2200"}, StartArgs()...) - rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) + rr, err := Run(t, exec.CommandContext(ctx, tf.Name(), "version")) + if err != nil { + t.Fatalf("error running skaffold version: %v\n%s", err, rr.Output()) + } + t.Logf("skaffold version: %s", rr.Stdout.Bytes()) + + args := append([]string{"start", "-p", profile, "--memory=2600"}, StartArgs()...) + rr, err = Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { t.Fatalf("starting minikube: %v\n%s", err, rr.Output()) } // make sure minikube binary is in path so that skaffold can access it abs, err := filepath.Abs(Target()) - // copy minikube binary to minikube - if err := mcnutils.CopyFile(Target(), filepath.Join(filepath.Dir(abs), "minikube")); err != nil { - t.Fatalf("error copying to minikube") - } if err != nil { - t.Fatalf("absolute path to minikube binary: %v", err) + t.Fatalf("unable to determine abs path: %v", err) } - os.Setenv("PATH", fmt.Sprintf("%s:%s", filepath.Dir(abs), os.Getenv("PATH"))) - // make sure 'docker' and 'minikube' are on PATH - for _, binary := range []string{"minikube", "docker"} { - rr, err := Run(t, exec.CommandContext(ctx, "which", binary)) - if err != nil { - t.Fatalf("'which %v' failed: check if %v is on PATH\n%v", binary, binary, rr.Output()) + + if filepath.Base(Target()) != "minikube" { + new := filepath.Join(filepath.Dir(abs), "minikube") + t.Logf("copying %s to %s", Target(), new) + if err := copy.Copy(Target(), new); err != nil { + t.Fatalf("error copying to minikube") } } + + oldPath := os.Getenv("PATH") + os.Setenv("PATH", fmt.Sprintf("%s:%s", filepath.Dir(abs), os.Getenv("PATH"))) + + // make sure 'docker' and 'minikube' are now in PATH + for _, binary := range []string{"minikube", "docker"} { + _, err := exec.LookPath(binary) + if err != nil { + t.Fatalf("%q is not in path", binary) + } + } + + defer func() { + os.Setenv("PATH", oldPath) + }() + // make sure "skaffold run" exits without failure - cmd := exec.CommandContext(ctx, tf.Name(), "run", "--kube-context", profile, "--status-check=true", "--port-forward=false") + cmd := exec.CommandContext(ctx, tf.Name(), "run", "--minikube-profile", profile, "--kube-context", profile, "--status-check=true", "--port-forward=false") cmd.Dir = "testdata/skaffold" rr, err = Run(t, cmd) if err != nil {