Merge pull request #7389 from sharifelgamal/driver-fallback

fallback to alternate drivers on failure
pull/7562/head
Sharif Elgamal 2020-04-09 12:59:01 -07:00 committed by GitHub
commit 527bbcd4e6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 281 additions and 137 deletions

View File

@ -20,6 +20,7 @@ import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
"k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/mustload"
"k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/node"
"k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/out"
@ -54,7 +55,10 @@ var nodeAddCmd = &cobra.Command{
} }
if err := node.Add(cc, n); err != nil { if err := node.Add(cc, n); err != nil {
maybeDeleteAndRetry(*cc, n, nil, err) _, err := maybeDeleteAndRetry(*cc, n, nil, err)
if err != nil {
exit.WithError("failed to add node", err)
}
} }
out.T(out.Ready, "Successfully added {{.name}} to {{.cluster}}!", out.V{"name": name, "cluster": cc.Name}) out.T(out.Ready, "Successfully added {{.name}} to {{.cluster}}!", out.V{"name": name, "cluster": cc.Name})

View File

@ -49,9 +49,27 @@ var nodeStartCmd = &cobra.Command{
exit.WithError("retrieving node", err) exit.WithError("retrieving node", err)
} }
_, err = node.Start(*cc, *n, nil, false) r, p, m, h, err := node.Provision(cc, n, false)
if err != nil { if err != nil {
maybeDeleteAndRetry(*cc, *n, nil, err) exit.WithError("provisioning host for node", err)
}
s := node.Starter{
Runner: r,
PreExists: p,
MachineAPI: m,
Host: h,
Cfg: cc,
Node: n,
ExistingAddons: nil,
}
_, err = node.Start(s, false)
if err != nil {
_, err := maybeDeleteAndRetry(*cc, *n, nil, err)
if err != nil {
exit.WithError("failed to start node", err)
}
} }
}, },
} }

View File

@ -150,11 +150,59 @@ func runStart(cmd *cobra.Command, args []string) {
} }
validateSpecifiedDriver(existing) validateSpecifiedDriver(existing)
ds := selectDriver(existing) ds, alts, specified := selectDriver(existing)
starter, err := provisionWithDriver(cmd, ds, existing)
if err != nil {
if specified {
// If the user specified a driver, don't fallback to anything else
exit.WithError("error provisioning host", err)
} else {
success := false
// Walk down the rest of the options
for _, alt := range alts {
out.WarningT("Startup with {{.old_driver}} driver failed, trying with alternate driver {{.new_driver}}: {{.error}}", out.V{"old_driver": ds.Name, "new_driver": alt.Name, "error": err})
ds = alt
// Delete the existing cluster and try again with the next driver on the list
profile, err := config.LoadProfile(ClusterFlagValue())
if err != nil {
glog.Warningf("%s profile does not exist, trying anyways.", ClusterFlagValue())
}
err = deleteProfile(profile)
if err != nil {
out.WarningT("Failed to delete cluster {{.name}}, proceeding with retry anyway.", out.V{"name": ClusterFlagValue()})
}
starter, err = provisionWithDriver(cmd, ds, existing)
if err != nil {
continue
} else {
// Success!
success = true
break
}
}
if !success {
exit.WithError("error provisioning host", err)
}
}
}
kubeconfig, err := startWithDriver(starter, existing)
if err != nil {
exit.WithError("failed to start node", err)
}
if err := showKubectlInfo(kubeconfig, starter.Node.KubernetesVersion, starter.Cfg.Name); err != nil {
glog.Errorf("kubectl info: %v", err)
}
}
func provisionWithDriver(cmd *cobra.Command, ds registry.DriverState, existing *config.ClusterConfig) (node.Starter, error) {
driverName := ds.Name driverName := ds.Name
glog.Infof("selected driver: %s", driverName) glog.Infof("selected driver: %s", driverName)
validateDriver(ds, existing) validateDriver(ds, existing)
err = autoSetDriverOptions(cmd, driverName) err := autoSetDriverOptions(cmd, driverName)
if err != nil { if err != nil {
glog.Errorf("Error autoSetOptions : %v", err) glog.Errorf("Error autoSetOptions : %v", err)
} }
@ -170,19 +218,19 @@ func runStart(cmd *cobra.Command, args []string) {
k8sVersion := getKubernetesVersion(existing) k8sVersion := getKubernetesVersion(existing)
cc, n, err := generateClusterConfig(cmd, existing, k8sVersion, driverName) cc, n, err := generateClusterConfig(cmd, existing, k8sVersion, driverName)
if err != nil { if err != nil {
exit.WithError("Failed to generate config", err) return node.Starter{}, errors.Wrap(err, "Failed to generate config")
} }
// This is about as far as we can go without overwriting config files // This is about as far as we can go without overwriting config files
if viper.GetBool(dryRun) { if viper.GetBool(dryRun) {
out.T(out.DryRun, `dry-run validation complete!`) out.T(out.DryRun, `dry-run validation complete!`)
return os.Exit(0)
} }
if driver.IsVM(driverName) { if driver.IsVM(driverName) {
url, err := download.ISO(viper.GetStringSlice(isoURL), cmd.Flags().Changed(isoURL)) url, err := download.ISO(viper.GetStringSlice(isoURL), cmd.Flags().Changed(isoURL))
if err != nil { if err != nil {
exit.WithError("Failed to cache ISO", err) return node.Starter{}, errors.Wrap(err, "Failed to cache ISO")
} }
cc.MinikubeISO = url cc.MinikubeISO = url
} }
@ -201,9 +249,29 @@ func runStart(cmd *cobra.Command, args []string) {
} }
} }
kubeconfig, err := node.Start(cc, n, existingAddons, true) mRunner, preExists, mAPI, host, err := node.Provision(&cc, &n, true)
if err != nil { if err != nil {
kubeconfig = maybeDeleteAndRetry(cc, n, existingAddons, err) return node.Starter{}, err
}
return node.Starter{
Runner: mRunner,
PreExists: preExists,
MachineAPI: mAPI,
Host: host,
ExistingAddons: existingAddons,
Cfg: &cc,
Node: &n,
}, nil
}
func startWithDriver(starter node.Starter, existing *config.ClusterConfig) (*kubeconfig.Settings, error) {
kubeconfig, err := node.Start(starter, true)
if err != nil {
kubeconfig, err = maybeDeleteAndRetry(*starter.Cfg, *starter.Node, starter.ExistingAddons, err)
if err != nil {
return nil, err
}
} }
numNodes := viper.GetInt(nodes) numNodes := viper.GetInt(nodes)
@ -211,7 +279,7 @@ func runStart(cmd *cobra.Command, args []string) {
numNodes = len(existing.Nodes) numNodes = len(existing.Nodes)
} }
if numNodes > 1 { if numNodes > 1 {
if driver.BareMetal(driverName) { if driver.BareMetal(starter.Cfg.Driver) {
exit.WithCodeT(exit.Config, "The none driver is not compatible with multi-node clusters.") exit.WithCodeT(exit.Config, "The none driver is not compatible with multi-node clusters.")
} else { } else {
for i := 1; i < numNodes; i++ { for i := 1; i < numNodes; i++ {
@ -220,20 +288,18 @@ func runStart(cmd *cobra.Command, args []string) {
Name: nodeName, Name: nodeName,
Worker: true, Worker: true,
ControlPlane: false, ControlPlane: false,
KubernetesVersion: cc.KubernetesConfig.KubernetesVersion, KubernetesVersion: starter.Cfg.KubernetesConfig.KubernetesVersion,
} }
out.Ln("") // extra newline for clarity on the command line out.Ln("") // extra newline for clarity on the command line
err := node.Add(&cc, n) err := node.Add(starter.Cfg, n)
if err != nil { if err != nil {
exit.WithError("adding node", err) return nil, errors.Wrap(err, "adding node")
} }
} }
} }
} }
if err := showKubectlInfo(kubeconfig, cc.KubernetesConfig.KubernetesVersion, cc.Name); err != nil { return kubeconfig, nil
glog.Errorf("kubectl info: %v", err)
}
} }
func updateDriver(driverName string) { func updateDriver(driverName string) {
@ -303,7 +369,7 @@ func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName st
return nil return nil
} }
func maybeDeleteAndRetry(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool, originalErr error) *kubeconfig.Settings { func maybeDeleteAndRetry(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool, originalErr error) (*kubeconfig.Settings, error) {
if viper.GetBool(deleteOnFailure) { if viper.GetBool(deleteOnFailure) {
out.WarningT("Node {{.name}} failed to start, deleting and trying again.", out.V{"name": n.Name}) out.WarningT("Node {{.name}} failed to start, deleting and trying again.", out.V{"name": n.Name})
// Start failed, delete the cluster and try again // Start failed, delete the cluster and try again
@ -318,21 +384,35 @@ func maybeDeleteAndRetry(cc config.ClusterConfig, n config.Node, existingAddons
} }
var kubeconfig *kubeconfig.Settings var kubeconfig *kubeconfig.Settings
for _, v := range cc.Nodes { for _, n := range cc.Nodes {
k, err := node.Start(cc, v, existingAddons, v.ControlPlane) r, p, m, h, err := node.Provision(&cc, &n, n.ControlPlane)
if v.ControlPlane { s := node.Starter{
Runner: r,
PreExists: p,
MachineAPI: m,
Host: h,
Cfg: &cc,
Node: &n,
ExistingAddons: existingAddons,
}
if err != nil {
// Ok we failed again, let's bail
return nil, err
}
k, err := node.Start(s, n.ControlPlane)
if n.ControlPlane {
kubeconfig = k kubeconfig = k
} }
if err != nil { if err != nil {
// Ok we failed again, let's bail // Ok we failed again, let's bail
exit.WithError("Start failed after cluster deletion", err) return nil, err
} }
} }
return kubeconfig return kubeconfig, nil
} }
// Don't delete the cluster unless they ask // Don't delete the cluster unless they ask
exit.WithError("startup failed", originalErr) return nil, errors.Wrap(originalErr, "startup failed")
return nil
} }
func kubectlVersion(path string) (string, error) { func kubectlVersion(path string) (string, error) {
@ -360,7 +440,7 @@ func kubectlVersion(path string) (string, error) {
return cv.ClientVersion.GitVersion, nil return cv.ClientVersion.GitVersion, nil
} }
func selectDriver(existing *config.ClusterConfig) registry.DriverState { func selectDriver(existing *config.ClusterConfig) (registry.DriverState, []registry.DriverState, bool) {
// Technically unrelated, but important to perform before detection // Technically unrelated, but important to perform before detection
driver.SetLibvirtURI(viper.GetString(kvmQemuURI)) driver.SetLibvirtURI(viper.GetString(kvmQemuURI))
@ -369,7 +449,7 @@ func selectDriver(existing *config.ClusterConfig) registry.DriverState {
old := hostDriver(existing) old := hostDriver(existing)
ds := driver.Status(old) ds := driver.Status(old)
out.T(out.Sparkle, `Using the {{.driver}} driver based on existing profile`, out.V{"driver": ds.String()}) out.T(out.Sparkle, `Using the {{.driver}} driver based on existing profile`, out.V{"driver": ds.String()})
return ds return ds, nil, true
} }
// Default to looking at the new driver parameter // Default to looking at the new driver parameter
@ -389,7 +469,7 @@ func selectDriver(existing *config.ClusterConfig) registry.DriverState {
exit.WithCodeT(exit.Unavailable, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": d, "os": runtime.GOOS}) exit.WithCodeT(exit.Unavailable, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": d, "os": runtime.GOOS})
} }
out.T(out.Sparkle, `Using the {{.driver}} driver based on user configuration`, out.V{"driver": ds.String()}) out.T(out.Sparkle, `Using the {{.driver}} driver based on user configuration`, out.V{"driver": ds.String()})
return ds return ds, nil, true
} }
// Fallback to old driver parameter // Fallback to old driver parameter
@ -399,7 +479,7 @@ func selectDriver(existing *config.ClusterConfig) registry.DriverState {
exit.WithCodeT(exit.Unavailable, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": d, "os": runtime.GOOS}) exit.WithCodeT(exit.Unavailable, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": d, "os": runtime.GOOS})
} }
out.T(out.Sparkle, `Using the {{.driver}} driver based on user configuration`, out.V{"driver": ds.String()}) out.T(out.Sparkle, `Using the {{.driver}} driver based on user configuration`, out.V{"driver": ds.String()})
return ds return ds, nil, true
} }
choices := driver.Choices(viper.GetBool("vm")) choices := driver.Choices(viper.GetBool("vm"))
@ -422,7 +502,7 @@ func selectDriver(existing *config.ClusterConfig) registry.DriverState {
} else { } else {
out.T(out.Sparkle, `Automatically selected the {{.driver}} driver`, out.V{"driver": pick.String()}) out.T(out.Sparkle, `Automatically selected the {{.driver}} driver`, out.V{"driver": pick.String()})
} }
return pick return pick, alts, false
} }
// hostDriver returns the actual driver used by a libmachine host, which can differ from our config // hostDriver returns the actual driver used by a libmachine host, which can differ from our config

View File

@ -18,7 +18,6 @@ limitations under the License.
package exit package exit
import ( import (
"fmt"
"os" "os"
"runtime" "runtime"
"runtime/debug" "runtime/debug"
@ -26,7 +25,6 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/out"
"k8s.io/minikube/pkg/minikube/problem" "k8s.io/minikube/pkg/minikube/problem"
"k8s.io/minikube/pkg/minikube/translate"
) )
// Exit codes based on sysexits(3) // Exit codes based on sysexits(3)
@ -41,9 +39,6 @@ const (
IO = 74 // IO represents an I/O error IO = 74 // IO represents an I/O error
Config = 78 // Config represents an unconfigured or misconfigured state Config = 78 // Config represents an unconfigured or misconfigured state
Permissions = 77 // Permissions represents a permissions error Permissions = 77 // Permissions represents a permissions error
// MaxLogEntries controls the number of log entries to show for each source
MaxLogEntries = 3
) )
// UsageT outputs a templated usage error and exits with error code 64 // UsageT outputs a templated usage error and exits with error code 64
@ -65,7 +60,7 @@ func WithError(msg string, err error) {
if p != nil { if p != nil {
WithProblem(msg, err, p) WithProblem(msg, err, p)
} }
displayError(msg, err) out.DisplayError(msg, err)
os.Exit(Software) os.Exit(Software)
} }
@ -81,29 +76,3 @@ func WithProblem(msg string, err error, p *problem.Problem) {
} }
os.Exit(Config) os.Exit(Config)
} }
// WithLogEntries outputs an error along with any important log entries, and exits.
func WithLogEntries(msg string, err error, entries map[string][]string) {
displayError(msg, err)
for name, lines := range entries {
out.FailureT("Problems detected in {{.entry}}:", out.V{"entry": name})
if len(lines) > MaxLogEntries {
lines = lines[:MaxLogEntries]
}
for _, l := range lines {
out.T(out.LogEntry, l)
}
}
os.Exit(Software)
}
func displayError(msg string, err error) {
// use Warning because Error will display a duplicate message to stderr
glog.Warningf(fmt.Sprintf("%s: %v", msg, err))
out.ErrT(out.Empty, "")
out.FatalT("{{.msg}}: {{.err}}", out.V{"msg": translate.T(msg), "err": err})
out.ErrT(out.Empty, "")
out.ErrT(out.Sad, "minikube is exiting due to an error. If the above message is not useful, open an issue:")
out.ErrT(out.URL, "https://github.com/kubernetes/minikube/issues/new/choose")
}

View File

@ -80,7 +80,6 @@ func handleDownloadOnly(cacheGroup, kicGroup *errgroup.Group, k8sVersion string)
} }
out.T(out.Check, "Download complete!") out.T(out.Check, "Download complete!")
os.Exit(0) os.Exit(0)
} }
// CacheKubectlBinary caches the kubectl binary // CacheKubectlBinary caches the kubectl binary

View File

@ -39,7 +39,21 @@ func Add(cc *config.ClusterConfig, n config.Node) error {
return errors.Wrap(err, "save node") return errors.Wrap(err, "save node")
} }
_, err := Start(*cc, n, nil, false) r, p, m, h, err := Provision(cc, &n, false)
if err != nil {
return err
}
s := Starter{
Runner: r,
PreExists: p,
MachineAPI: m,
Host: h,
Cfg: cc,
Node: &n,
ExistingAddons: nil,
}
_, err = Start(s, false)
return err return err
} }

View File

@ -59,58 +59,45 @@ import (
const waitTimeout = "wait-timeout" const waitTimeout = "wait-timeout"
var (
kicGroup errgroup.Group
cacheGroup errgroup.Group
)
// Starter is a struct with all the necessary information to start a node
type Starter struct {
Runner command.Runner
PreExists bool
MachineAPI libmachine.API
Host *host.Host
Cfg *config.ClusterConfig
Node *config.Node
ExistingAddons map[string]bool
}
// Start spins up a guest and starts the kubernetes node. // Start spins up a guest and starts the kubernetes node.
func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool, apiServer bool) (*kubeconfig.Settings, error) { func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) {
name := driver.MachineName(cc, n)
if apiServer {
out.T(out.ThumbsUp, "Starting control plane node {{.name}} in cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name})
} else {
out.T(out.ThumbsUp, "Starting node {{.name}} in cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name})
}
var kicGroup errgroup.Group
if driver.IsKIC(cc.Driver) {
beginDownloadKicArtifacts(&kicGroup)
}
var cacheGroup errgroup.Group
if !driver.BareMetal(cc.Driver) {
beginCacheKubernetesImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion, cc.KubernetesConfig.ContainerRuntime)
}
// Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot.
// Hence, saveConfig must be called before startHost, and again afterwards when we know the IP.
if err := config.SaveProfile(viper.GetString(config.ProfileName), &cc); err != nil {
exit.WithError("Failed to save config", err)
}
handleDownloadOnly(&cacheGroup, &kicGroup, n.KubernetesVersion)
waitDownloadKicArtifacts(&kicGroup)
mRunner, preExists, machineAPI, host := startMachine(&cc, &n)
defer machineAPI.Close()
// wait for preloaded tarball to finish downloading before configuring runtimes // wait for preloaded tarball to finish downloading before configuring runtimes
waitCacheRequiredImages(&cacheGroup) waitCacheRequiredImages(&cacheGroup)
sv, err := util.ParseKubernetesVersion(n.KubernetesVersion) sv, err := util.ParseKubernetesVersion(starter.Node.KubernetesVersion)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Failed to parse kubernetes version") return nil, errors.Wrap(err, "Failed to parse kubernetes version")
} }
// configure the runtime (docker, containerd, crio) // configure the runtime (docker, containerd, crio)
cr := configureRuntimes(mRunner, cc, sv) cr := configureRuntimes(starter.Runner, *starter.Cfg, sv)
showVersionInfo(n.KubernetesVersion, cr) showVersionInfo(starter.Node.KubernetesVersion, cr)
// ssh should be set up by now // ssh should be set up by now
// switch to using ssh runner since it is faster // switch to using ssh runner since it is faster
if driver.IsKIC(cc.Driver) { if driver.IsKIC(starter.Cfg.Driver) {
sshRunner, err := machine.SSHRunner(host) sshRunner, err := machine.SSHRunner(starter.Host)
if err != nil { if err != nil {
glog.Infof("error getting ssh runner: %v", err) glog.Infof("error getting ssh runner: %v", err)
} else { } else {
glog.Infof("Using ssh runner for kic...") glog.Infof("Using ssh runner for kic...")
mRunner = sshRunner starter.Runner = sshRunner
} }
} }
@ -118,17 +105,18 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo
var kcs *kubeconfig.Settings var kcs *kubeconfig.Settings
if apiServer { if apiServer {
// Must be written before bootstrap, otherwise health checks may flake due to stale IP // Must be written before bootstrap, otherwise health checks may flake due to stale IP
kcs = setupKubeconfig(host, &cc, &n, cc.Name) kcs = setupKubeconfig(starter.Host, starter.Cfg, starter.Node, starter.Cfg.Name)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Failed to setup kubeconfig") return nil, errors.Wrap(err, "Failed to setup kubeconfig")
} }
// setup kubeadm (must come after setupKubeconfig) // setup kubeadm (must come after setupKubeconfig)
bs = setupKubeAdm(machineAPI, cc, n) bs = setupKubeAdm(starter.MachineAPI, *starter.Cfg, *starter.Node)
err = bs.StartCluster(*starter.Cfg)
err = bs.StartCluster(cc)
if err != nil { if err != nil {
exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, cc, mRunner)) out.LogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, *starter.Cfg, starter.Runner))
return nil, err
} }
// write the kubeconfig to the file system after everything required (like certs) are created by the bootstrapper // write the kubeconfig to the file system after everything required (like certs) are created by the bootstrapper
@ -136,12 +124,12 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo
return nil, errors.Wrap(err, "Failed to update kubeconfig file.") return nil, errors.Wrap(err, "Failed to update kubeconfig file.")
} }
} else { } else {
bs, err = cluster.Bootstrapper(machineAPI, viper.GetString(cmdcfg.Bootstrapper), cc, n) bs, err = cluster.Bootstrapper(starter.MachineAPI, viper.GetString(cmdcfg.Bootstrapper), *starter.Cfg, *starter.Node)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Failed to get bootstrapper") return nil, errors.Wrap(err, "Failed to get bootstrapper")
} }
if err = bs.SetupCerts(cc.KubernetesConfig, n); err != nil { if err = bs.SetupCerts(starter.Cfg.KubernetesConfig, *starter.Node); err != nil {
return nil, errors.Wrap(err, "setting up certs") return nil, errors.Wrap(err, "setting up certs")
} }
} }
@ -158,43 +146,43 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo
}() }()
// enable addons, both old and new! // enable addons, both old and new!
if existingAddons != nil { if starter.ExistingAddons != nil {
go addons.Start(&wg, &cc, existingAddons, config.AddonList) go addons.Start(&wg, starter.Cfg, starter.ExistingAddons, config.AddonList)
} }
if apiServer { if apiServer {
// special ops for none , like change minikube directory. // special ops for none , like change minikube directory.
// multinode super doesn't work on the none driver // multinode super doesn't work on the none driver
if cc.Driver == driver.None && len(cc.Nodes) == 1 { if starter.Cfg.Driver == driver.None && len(starter.Cfg.Nodes) == 1 {
prepareNone() prepareNone()
} }
// Skip pre-existing, because we already waited for health // Skip pre-existing, because we already waited for health
if kverify.ShouldWait(cc.VerifyComponents) && !preExists { if kverify.ShouldWait(starter.Cfg.VerifyComponents) && !starter.PreExists {
if err := bs.WaitForNode(cc, n, viper.GetDuration(waitTimeout)); err != nil { if err := bs.WaitForNode(*starter.Cfg, *starter.Node, viper.GetDuration(waitTimeout)); err != nil {
return nil, errors.Wrap(err, "Wait failed") return nil, errors.Wrap(err, "Wait failed")
} }
} }
} else { } else {
if err := bs.UpdateNode(cc, n, cr); err != nil { if err := bs.UpdateNode(*starter.Cfg, *starter.Node, cr); err != nil {
return nil, errors.Wrap(err, "Updating node") return nil, errors.Wrap(err, "Updating node")
} }
cp, err := config.PrimaryControlPlane(&cc) cp, err := config.PrimaryControlPlane(starter.Cfg)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Getting primary control plane") return nil, errors.Wrap(err, "Getting primary control plane")
} }
cpBs, err := cluster.Bootstrapper(machineAPI, viper.GetString(cmdcfg.Bootstrapper), cc, cp) cpBs, err := cluster.Bootstrapper(starter.MachineAPI, viper.GetString(cmdcfg.Bootstrapper), *starter.Cfg, cp)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Getting bootstrapper") return nil, errors.Wrap(err, "Getting bootstrapper")
} }
joinCmd, err := cpBs.GenerateToken(cc) joinCmd, err := cpBs.GenerateToken(*starter.Cfg)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "generating join token") return nil, errors.Wrap(err, "generating join token")
} }
if err = bs.JoinCluster(cc, n, joinCmd); err != nil { if err = bs.JoinCluster(*starter.Cfg, *starter.Node, joinCmd); err != nil {
return nil, errors.Wrap(err, "joining cluster") return nil, errors.Wrap(err, "joining cluster")
} }
} }
@ -202,7 +190,38 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo
wg.Wait() wg.Wait()
// Write enabled addons to the config before completion // Write enabled addons to the config before completion
return kcs, config.Write(viper.GetString(config.ProfileName), &cc) return kcs, config.Write(viper.GetString(config.ProfileName), starter.Cfg)
}
// Provision provisions the machine/container for the node
func Provision(cc *config.ClusterConfig, n *config.Node, apiServer bool) (command.Runner, bool, libmachine.API, *host.Host, error) {
name := driver.MachineName(*cc, *n)
if apiServer {
out.T(out.ThumbsUp, "Starting control plane node {{.name}} in cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name})
} else {
out.T(out.ThumbsUp, "Starting node {{.name}} in cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name})
}
if driver.IsKIC(cc.Driver) {
beginDownloadKicArtifacts(&kicGroup)
}
if !driver.BareMetal(cc.Driver) {
beginCacheKubernetesImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion, cc.KubernetesConfig.ContainerRuntime)
}
// Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot.
// Hence, saveConfig must be called before startHost, and again afterwards when we know the IP.
if err := config.SaveProfile(viper.GetString(config.ProfileName), cc); err != nil {
return nil, false, nil, nil, errors.Wrap(err, "Failed to save config")
}
handleDownloadOnly(&cacheGroup, &kicGroup, n.KubernetesVersion)
waitDownloadKicArtifacts(&kicGroup)
return startMachine(cc, n)
} }
// ConfigureRuntimes does what needs to happen to get a runtime going. // ConfigureRuntimes does what needs to happen to get a runtime going.
@ -311,18 +330,24 @@ func apiServerURL(h host.Host, cc config.ClusterConfig, n config.Node) (string,
} }
// StartMachine starts a VM // StartMachine starts a VM
func startMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) { func startMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host, err error) {
m, err := machine.NewAPIClient() m, err := machine.NewAPIClient()
if err != nil { if err != nil {
exit.WithError("Failed to get machine client", err) return runner, preExists, m, host, errors.Wrap(err, "Failed to get machine client")
}
host, preExists, err = startHost(m, *cfg, *node)
if err != nil {
return runner, preExists, m, host, errors.Wrap(err, "Failed to start host")
} }
host, preExists = startHost(m, *cfg, *node)
runner, err = machine.CommandRunner(host) runner, err = machine.CommandRunner(host)
if err != nil { if err != nil {
exit.WithError("Failed to get command runner", err) return runner, preExists, m, host, errors.Wrap(err, "Failed to get command runner")
} }
ip := validateNetwork(host, runner, cfg.KubernetesConfig.ImageRepository) ip, err := validateNetwork(host, runner, cfg.KubernetesConfig.ImageRepository)
if err != nil {
return runner, preExists, m, host, errors.Wrap(err, "Failed to validate network")
}
// Bypass proxy for minikube's vm host ip // Bypass proxy for minikube's vm host ip
err = proxy.ExcludeIP(ip) err = proxy.ExcludeIP(ip)
@ -334,17 +359,17 @@ func startMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.
node.IP = ip node.IP = ip
err = config.SaveNode(cfg, node) err = config.SaveNode(cfg, node)
if err != nil { if err != nil {
exit.WithError("saving node", err) return runner, preExists, m, host, errors.Wrap(err, "saving node")
} }
return runner, preExists, m, host return runner, preExists, m, host, err
} }
// startHost starts a new minikube host using a VM or None // startHost starts a new minikube host using a VM or None
func startHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.Host, bool) { func startHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.Host, bool, error) {
host, exists, err := machine.StartHost(api, cc, n) host, exists, err := machine.StartHost(api, cc, n)
if err == nil { if err == nil {
return host, exists return host, exists, nil
} }
out.ErrT(out.Embarrassed, "StartHost failed, but will try again: {{.error}}", out.V{"error": err}) out.ErrT(out.Embarrassed, "StartHost failed, but will try again: {{.error}}", out.V{"error": err})
@ -361,20 +386,20 @@ func startHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*hos
host, exists, err = machine.StartHost(api, cc, n) host, exists, err = machine.StartHost(api, cc, n)
if err == nil { if err == nil {
return host, exists return host, exists, nil
} }
// Don't use host.Driver to avoid nil pointer deref // Don't use host.Driver to avoid nil pointer deref
drv := cc.Driver drv := cc.Driver
exit.WithError(fmt.Sprintf(`Failed to start %s %s. "%s" may fix it.`, drv, driver.MachineType(drv), mustload.ExampleCmd(cc.Name, "start")), err) out.ErrT(out.Sad, `Failed to start {{.driver}} {{.driver_type}}. "{{.cmd}}" may fix it: {{.error}}`, out.V{"driver": drv, "driver_type": driver.MachineType(drv), "cmd": mustload.ExampleCmd(cc.Name, "start"), "error": err})
return host, exists return host, exists, err
} }
// validateNetwork tries to catch network problems as soon as possible // validateNetwork tries to catch network problems as soon as possible
func validateNetwork(h *host.Host, r command.Runner, imageRepository string) string { func validateNetwork(h *host.Host, r command.Runner, imageRepository string) (string, error) {
ip, err := h.Driver.GetIP() ip, err := h.Driver.GetIP()
if err != nil { if err != nil {
exit.WithError("Unable to get VM IP address", err) return ip, err
} }
optSeen := false optSeen := false
@ -396,17 +421,19 @@ func validateNetwork(h *host.Host, r command.Runner, imageRepository string) str
} }
if !driver.BareMetal(h.Driver.DriverName()) && !driver.IsKIC(h.Driver.DriverName()) { if !driver.BareMetal(h.Driver.DriverName()) && !driver.IsKIC(h.Driver.DriverName()) {
trySSH(h, ip) if err := trySSH(h, ip); err != nil {
return ip, err
}
} }
// Non-blocking // Non-blocking
go tryRegistry(r, h.Driver.DriverName(), imageRepository) go tryRegistry(r, h.Driver.DriverName(), imageRepository)
return ip return ip, nil
} }
func trySSH(h *host.Host, ip string) { func trySSH(h *host.Host, ip string) error {
if viper.GetBool("force") { if viper.GetBool("force") {
return return nil
} }
sshAddr := net.JoinHostPort(ip, "22") sshAddr := net.JoinHostPort(ip, "22")
@ -422,8 +449,9 @@ func trySSH(h *host.Host, ip string) {
return nil return nil
} }
if err := retry.Expo(dial, time.Second, 13*time.Second); err != nil { err := retry.Expo(dial, time.Second, 13*time.Second)
exit.WithCodeT(exit.IO, `minikube is unable to connect to the VM: {{.error}} if err != nil {
out.ErrT(out.FailureType, `minikube is unable to connect to the VM: {{.error}}
This is likely due to one of two reasons: This is likely due to one of two reasons:
@ -439,6 +467,8 @@ func trySSH(h *host.Host, ip string) {
- Use --force to override this connectivity check - Use --force to override this connectivity check
`, out.V{"error": err, "hypervisor": h.Driver.DriverName(), "ip": ip}) `, out.V{"error": err, "hypervisor": h.Driver.DriverName(), "ip": ip})
} }
return err
} }
// tryRegistry tries to connect to the image repository // tryRegistry tries to connect to the image repository

View File

@ -26,6 +26,7 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
isatty "github.com/mattn/go-isatty" isatty "github.com/mattn/go-isatty"
"k8s.io/minikube/pkg/minikube/translate"
) )
// By design, this package uses global references to language and output objects, in preference // By design, this package uses global references to language and output objects, in preference
@ -51,6 +52,9 @@ var (
OverrideEnv = "MINIKUBE_IN_STYLE" OverrideEnv = "MINIKUBE_IN_STYLE"
) )
// MaxLogEntries controls the number of log entries to show for each source
const MaxLogEntries = 3
// fdWriter is the subset of file.File that implements io.Writer and Fd() // fdWriter is the subset of file.File that implements io.Writer and Fd()
type fdWriter interface { type fdWriter interface {
io.Writer io.Writer
@ -175,3 +179,29 @@ func wantsColor(fd uintptr) bool {
glog.Infof("isatty.IsTerminal(%d) = %v\n", fd, isT) glog.Infof("isatty.IsTerminal(%d) = %v\n", fd, isT)
return isT return isT
} }
// LogEntries outputs an error along with any important log entries.
func LogEntries(msg string, err error, entries map[string][]string) {
DisplayError(msg, err)
for name, lines := range entries {
T(FailureType, "Problems detected in {{.entry}}:", V{"entry": name})
if len(lines) > MaxLogEntries {
lines = lines[:MaxLogEntries]
}
for _, l := range lines {
T(LogEntry, l)
}
}
}
// DisplayError prints the error and displays the standard minikube error messaging
func DisplayError(msg string, err error) {
// use Warning because Error will display a duplicate message to stderr
glog.Warningf(fmt.Sprintf("%s: %v", msg, err))
ErrT(Empty, "")
FatalT("{{.msg}}: {{.err}}", V{"msg": translate.T(msg), "err": err})
ErrT(Empty, "")
ErrT(Sad, "minikube is exiting due to an error. If the above message is not useful, open an issue:")
ErrT(URL, "https://github.com/kubernetes/minikube/issues/new/choose")
}