Merge pull request #4959 from tstromberg/distro

UI: Add profile name & distro version to intro, clarify other messages
pull/4960/head^2
Thomas Strömberg 2019-08-02 15:03:08 -07:00 committed by GitHub
commit aabe45b184
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 44 additions and 17 deletions

View File

@ -41,6 +41,7 @@ import (
"github.com/google/go-containerregistry/pkg/authn" "github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name" "github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/remote" "github.com/google/go-containerregistry/pkg/v1/remote"
gopshost "github.com/shirou/gopsutil/host"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
@ -206,9 +207,41 @@ var startCmd = &cobra.Command{
Run: runStart, Run: runStart,
} }
// platform generates a user-readable platform message
func platform() string {
var s strings.Builder
// Show the distro version if possible
hi, err := gopshost.Info()
if err == nil {
s.WriteString(fmt.Sprintf("%s %s", strings.Title(hi.Platform), hi.PlatformVersion))
glog.Infof("hostinfo: %+v", hi)
} else {
glog.Errorf("gopshost.Info returned error: %v", err)
s.WriteString(runtime.GOOS)
}
vsys, vrole, err := gopshost.Virtualization()
if err != nil {
glog.Errorf("gopshost.Virtualization returned error: %v", err)
} else {
glog.Infof("virtualization: %s %s", vsys, vrole)
}
// This environment is exotic, let's output a bit more.
if vrole == "guest" || runtime.GOARCH != "amd64" {
s.WriteString(fmt.Sprintf(" (%s/%s)", vsys, runtime.GOARCH))
}
return s.String()
}
// runStart handles the executes the flow of "minikube start" // runStart handles the executes the flow of "minikube start"
func runStart(cmd *cobra.Command, args []string) { func runStart(cmd *cobra.Command, args []string) {
out.T(out.Happy, "minikube {{.version}} on {{.os}} ({{.arch}})", out.V{"version": version.GetVersion(), "os": runtime.GOOS, "arch": runtime.GOARCH}) prefix := ""
if viper.GetString(cfg.MachineProfile) != constants.DefaultMachineName {
prefix = fmt.Sprintf("[%s] ", viper.GetString(cfg.MachineProfile))
}
out.T(out.Happy, "{{.prefix}}minikube {{.version}} on {{.platform}}", out.V{"prefix": prefix, "version": version.GetVersion(), "platform": platform()})
vmDriver := viper.GetString(vmDriver) vmDriver := viper.GetString(vmDriver)
if err := cmdcfg.IsValidDriver(runtime.GOOS, vmDriver); err != nil { if err := cmdcfg.IsValidDriver(runtime.GOOS, vmDriver); err != nil {
@ -342,7 +375,7 @@ func skipCache(config *cfg.Config) {
func showVersionInfo(k8sVersion string, cr cruntime.Manager) { func showVersionInfo(k8sVersion string, cr cruntime.Manager) {
version, _ := cr.Version() version, _ := cr.Version()
out.T(cr.Style(), "Configuring environment for Kubernetes {{.k8sVersion}} on {{.runtime}} {{.runtimeVersion}}", out.V{"k8sVersion": k8sVersion, "runtime": cr.Name(), "runtimeVersion": version}) out.T(cr.Style(), "Preparing Kubernetes {{.k8sVersion}} on {{.runtime}} {{.runtimeVersion}} ...", out.V{"k8sVersion": k8sVersion, "runtime": cr.Name(), "runtimeVersion": version})
for _, v := range dockerOpt { for _, v := range dockerOpt {
out.T(out.Option, "opt {{.docker_option}}", out.V{"docker_option": v}) out.T(out.Option, "opt {{.docker_option}}", out.V{"docker_option": v})
} }
@ -355,11 +388,7 @@ func showKubectlConnectInfo(kubeconfig *pkgutil.KubeConfigSetup) {
if kubeconfig.KeepContext { if kubeconfig.KeepContext {
out.T(out.Kubectl, "To connect to this cluster, use: kubectl --context={{.name}}", out.V{"name": kubeconfig.ClusterName}) out.T(out.Kubectl, "To connect to this cluster, use: kubectl --context={{.name}}", out.V{"name": kubeconfig.ClusterName})
} else { } else {
if !viper.GetBool(waitUntilHealthy) { out.T(out.Ready, `Done! kubectl is now configured to use "{{.name}}"`, out.V{"name": cfg.GetMachineName()})
out.T(out.Ready, "kubectl has been configured configured to use {{.name}}", out.V{"name": cfg.GetMachineName()})
} else {
out.T(out.Ready, "Done! kubectl is now configured to use {{.name}}", out.V{"name": cfg.GetMachineName()})
}
} }
_, err := exec.LookPath("kubectl") _, err := exec.LookPath("kubectl")
if err != nil { if err != nil {
@ -742,7 +771,7 @@ func validateKubernetesVersions(old *cfg.Config) (string, bool) {
return nv, isUpgrade return nv, isUpgrade
} }
if nvs.GT(ovs) { if nvs.GT(ovs) {
out.T(out.ThumbsUp, "minikube will upgrade the local cluster from Kubernetes {{.old}} to {{.new}}", out.V{"old": ovs, "new": nvs}) out.T(out.ThumbsUp, "Upgrading from Kubernetes {{.old}} to {{.new}}", out.V{"old": ovs, "new": nvs})
isUpgrade = true isUpgrade = true
} }
return nv, isUpgrade return nv, isUpgrade
@ -828,7 +857,7 @@ func bootstrapCluster(bs bootstrapper.Bootstrapper, r cruntime.Manager, runner c
} }
if preexisting { if preexisting {
out.T(out.Restarting, "Relaunching Kubernetes {{.version}} using {{.bootstrapper}} ... ", out.V{"version": kc.KubernetesVersion, "bootstrapper": bsName}) out.T(out.Restarting, "Relaunching Kubernetes using {{.bootstrapper}} ... ", out.V{"bootstrapper": bsName})
if err := bs.RestartCluster(kc); err != nil { if err := bs.RestartCluster(kc); err != nil {
exit.WithLogEntries("Error restarting cluster", err, logs.FindProblems(r, bs, runner)) exit.WithLogEntries("Error restarting cluster", err, logs.FindProblems(r, bs, runner))
} }

View File

@ -305,7 +305,7 @@ func (k *Bootstrapper) WaitCluster(k8s config.KubernetesConfig) error {
// by a CNI plugin which is usually started after minikube has been brought // by a CNI plugin which is usually started after minikube has been brought
// up. Otherwise, minikube won't start, as "k8s-app" pods are not ready. // up. Otherwise, minikube won't start, as "k8s-app" pods are not ready.
componentsOnly := k8s.NetworkPlugin == "cni" componentsOnly := k8s.NetworkPlugin == "cni"
out.T(out.WaitingPods, "Verifying:") out.T(out.WaitingPods, "Waiting for:")
client, err := util.GetClient() client, err := util.GetClient()
if err != nil { if err != nil {
return errors.Wrap(err, "k8s client") return errors.Wrap(err, "k8s client")

View File

@ -120,9 +120,9 @@ func StartHost(api libmachine.API, config cfg.MachineConfig) (*host.Host, error)
} }
if s == state.Running { if s == state.Running {
out.T(out.Running, `Re-using the currently running {{.driver_name}} VM for "{{.profile_name}}" ...`, out.V{"driver_name": h.Driver.DriverName(), "profile_name": cfg.GetMachineName()}) out.T(out.Running, `Using the running {{.driver_name}} "{{.profile_name}}" VM ...`, out.V{"driver_name": h.Driver.DriverName(), "profile_name": cfg.GetMachineName()})
} else { } else {
out.T(out.Restarting, `Restarting existing {{.driver_name}} VM for "{{.profile_name}}" ...`, out.V{"driver_name": h.Driver.DriverName(), "profile_name": cfg.GetMachineName()}) out.T(out.Restarting, `Starting existing {{.driver_name}} VM for "{{.profile_name}}" ...`, out.V{"driver_name": h.Driver.DriverName(), "profile_name": cfg.GetMachineName()})
if err := h.Driver.Start(); err != nil { if err := h.Driver.Start(); err != nil {
return nil, errors.Wrap(err, "start") return nil, errors.Wrap(err, "start")
} }
@ -134,6 +134,7 @@ func StartHost(api libmachine.API, config cfg.MachineConfig) (*host.Host, error)
e := engineOptions(config) e := engineOptions(config)
glog.Infof("engine options: %+v", e) glog.Infof("engine options: %+v", e)
out.T(out.Waiting, "Waiting for the host to be provisioned ...")
err = configureHost(h, e) err = configureHost(h, e)
if err != nil { if err != nil {
return nil, err return nil, err
@ -152,9 +153,6 @@ func localDriver(name string) bool {
// configureHost handles any post-powerup configuration required // configureHost handles any post-powerup configuration required
func configureHost(h *host.Host, e *engine.Options) error { func configureHost(h *host.Host, e *engine.Options) error {
glog.Infof("configureHost: %T %+v", h, h) glog.Infof("configureHost: %T %+v", h, h)
// Slightly counter-intuitive, but this is what DetectProvisioner & ConfigureAuth block on.
out.T(out.Waiting, "Waiting for SSH access ...", out.V{})
if len(e.Env) > 0 { if len(e.Env) > 0 {
h.HostOptions.EngineOptions.Env = e.Env h.HostOptions.EngineOptions.Env = e.Env
glog.Infof("Detecting provisioner ...") glog.Infof("Detecting provisioner ...")
@ -409,7 +407,7 @@ func showRemoteOsRelease(driver drivers.Driver) {
return return
} }
out.T(out.Provisioner, "Provisioned with {{.pretty_name}}", out.V{"pretty_name": osReleaseInfo.PrettyName}) glog.Infof("Provisioned with %s", osReleaseInfo.PrettyName)
} }
func createHost(api libmachine.API, config cfg.MachineConfig) (*host.Host, error) { func createHost(api libmachine.API, config cfg.MachineConfig) (*host.Host, error) {

View File

@ -115,7 +115,7 @@ func Output(r cruntime.Manager, bs bootstrapper.Bootstrapper, runner command.Run
cmds := logCommands(r, bs, lines, false) cmds := logCommands(r, bs, lines, false)
// These are not technically logs, but are useful to have in bug reports. // These are not technically logs, but are useful to have in bug reports.
cmds["kernel"] = "uptime && uname -a" cmds["kernel"] = "uptime && uname -a && grep PRETTY /etc/os-release"
names := []string{} names := []string{}
for k := range cmds { for k := range cmds {