Merge pull request #7345 from sharifelgamal/delete-on-failure
add delete-on-failure flagpull/7377/head
commit
5806bb01f2
|
@ -18,10 +18,8 @@ package cmd
|
|||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
"k8s.io/minikube/pkg/minikube/node"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
|
@ -56,7 +54,7 @@ var nodeAddCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
if err := node.Add(cc, n); err != nil {
|
||||
exit.WithError("Error adding node to cluster", err)
|
||||
maybeDeleteAndRetry(*cc, n, nil, err)
|
||||
}
|
||||
|
||||
out.T(out.Ready, "Successfully added {{.name}} to {{.cluster}}!", out.V{"name": name, "cluster": cc.Name})
|
||||
|
@ -64,13 +62,10 @@ var nodeAddCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
func init() {
|
||||
// TODO(https://github.com/kubernetes/minikube/issues/7366): We should figure out which minikube start flags to actually import
|
||||
nodeAddCmd.Flags().BoolVar(&cp, "control-plane", false, "If true, the node added will also be a control plane in addition to a worker.")
|
||||
nodeAddCmd.Flags().BoolVar(&worker, "worker", true, "If true, the added node will be marked for work. Defaults to true.")
|
||||
//We should figure out which of these flags to actually import
|
||||
startCmd.Flags().Visit(
|
||||
func(f *pflag.Flag) {
|
||||
nodeAddCmd.Flags().AddFlag(f)
|
||||
},
|
||||
)
|
||||
nodeAddCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.")
|
||||
|
||||
nodeCmd.AddCommand(nodeAddCmd)
|
||||
}
|
||||
|
|
|
@ -49,12 +49,15 @@ var nodeStartCmd = &cobra.Command{
|
|||
exit.WithError("retrieving node", err)
|
||||
}
|
||||
|
||||
// Start it up baby
|
||||
node.Start(*cc, *n, nil, false)
|
||||
_, err = node.Start(*cc, *n, nil, false)
|
||||
if err != nil {
|
||||
maybeDeleteAndRetry(*cc, *n, nil, err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
nodeStartCmd.Flags().String("name", "", "The name of the node to start")
|
||||
nodeStartCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.")
|
||||
nodeCmd.AddCommand(nodeStartCmd)
|
||||
}
|
||||
|
|
|
@ -124,6 +124,7 @@ const (
|
|||
natNicType = "nat-nic-type"
|
||||
nodes = "nodes"
|
||||
preload = "preload"
|
||||
deleteOnFailure = "delete-on-failure"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -177,6 +178,7 @@ func initMinikubeFlags() {
|
|||
startCmd.Flags().Bool(installAddons, true, "If set, install addons. Defaults to true.")
|
||||
startCmd.Flags().IntP(nodes, "n", 1, "The number of nodes to spin up. Defaults to 1.")
|
||||
startCmd.Flags().Bool(preload, true, "If set, download tarball of preloaded images if available to improve start time. Defaults to true.")
|
||||
startCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.")
|
||||
}
|
||||
|
||||
// initKubernetesFlags inits the commandline flags for kubernetes related options
|
||||
|
@ -353,7 +355,10 @@ func runStart(cmd *cobra.Command, args []string) {
|
|||
}
|
||||
}
|
||||
|
||||
kubeconfig := node.Start(cc, n, existingAddons, true)
|
||||
kubeconfig, err := node.Start(cc, n, existingAddons, true)
|
||||
if err != nil {
|
||||
kubeconfig = maybeDeleteAndRetry(cc, n, existingAddons, err)
|
||||
}
|
||||
|
||||
numNodes := viper.GetInt(nodes)
|
||||
if numNodes == 1 && existing != nil {
|
||||
|
@ -457,6 +462,38 @@ func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName st
|
|||
return nil
|
||||
}
|
||||
|
||||
func maybeDeleteAndRetry(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool, originalErr error) *kubeconfig.Settings {
|
||||
if viper.GetBool(deleteOnFailure) {
|
||||
out.T(out.Warning, "Node {{.name}} failed to start, deleting and trying again.", out.V{"name": n.Name})
|
||||
// Start failed, delete the cluster and try again
|
||||
profile, err := config.LoadProfile(cc.Name)
|
||||
if err != nil {
|
||||
out.ErrT(out.Meh, `"{{.name}}" profile does not exist, trying anyways.`, out.V{"name": cc.Name})
|
||||
}
|
||||
|
||||
err = deleteProfile(profile)
|
||||
if err != nil {
|
||||
out.WarningT("Failed to delete cluster {{.name}}, proceeding with retry anyway.", out.V{"name": cc.Name})
|
||||
}
|
||||
|
||||
var kubeconfig *kubeconfig.Settings
|
||||
for _, v := range cc.Nodes {
|
||||
k, err := node.Start(cc, v, existingAddons, v.ControlPlane)
|
||||
if v.ControlPlane {
|
||||
kubeconfig = k
|
||||
}
|
||||
if err != nil {
|
||||
// Ok we failed again, let's bail
|
||||
exit.WithError("Start failed after cluster deletion", err)
|
||||
}
|
||||
}
|
||||
return kubeconfig
|
||||
}
|
||||
// Don't delete the cluster unless they ask
|
||||
exit.WithError("startup failed", originalErr)
|
||||
return nil
|
||||
}
|
||||
|
||||
func kubectlVersion(path string) (string, error) {
|
||||
j, err := exec.Command(path, "version", "--client", "--output=json").Output()
|
||||
if err != nil {
|
||||
|
|
|
@ -39,9 +39,8 @@ func Add(cc *config.ClusterConfig, n config.Node) error {
|
|||
return errors.Wrap(err, "save node")
|
||||
}
|
||||
|
||||
// TODO: Start should return an error rather than calling exit!
|
||||
Start(*cc, n, nil, false)
|
||||
return nil
|
||||
_, err := Start(*cc, n, nil, false)
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete stops and deletes the given node from the given cluster
|
||||
|
|
|
@ -65,7 +65,7 @@ const (
|
|||
)
|
||||
|
||||
// Start spins up a guest and starts the kubernetes node.
|
||||
func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool, apiServer bool) *kubeconfig.Settings {
|
||||
func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool, apiServer bool) (*kubeconfig.Settings, error) {
|
||||
cp := ""
|
||||
if apiServer {
|
||||
cp = "control plane "
|
||||
|
@ -100,7 +100,7 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo
|
|||
|
||||
sv, err := util.ParseKubernetesVersion(n.KubernetesVersion)
|
||||
if err != nil {
|
||||
exit.WithError("Failed to parse kubernetes version", err)
|
||||
return nil, errors.Wrap(err, "Failed to parse kubernetes version")
|
||||
}
|
||||
|
||||
// configure the runtime (docker, containerd, crio)
|
||||
|
@ -113,7 +113,7 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo
|
|||
// Must be written before bootstrap, otherwise health checks may flake due to stale IP
|
||||
kcs = setupKubeconfig(host, &cc, &n, cc.Name)
|
||||
if err != nil {
|
||||
exit.WithError("Failed to setup kubeconfig", err)
|
||||
return nil, errors.Wrap(err, "Failed to setup kubeconfig")
|
||||
}
|
||||
|
||||
// setup kubeadm (must come after setupKubeconfig)
|
||||
|
@ -125,16 +125,16 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo
|
|||
|
||||
// write the kubeconfig to the file system after everything required (like certs) are created by the bootstrapper
|
||||
if err := kubeconfig.Update(kcs); err != nil {
|
||||
exit.WithError("Failed to update kubeconfig file.", err)
|
||||
return nil, errors.Wrap(err, "Failed to update kubeconfig file.")
|
||||
}
|
||||
} else {
|
||||
bs, err = cluster.Bootstrapper(machineAPI, viper.GetString(cmdcfg.Bootstrapper), cc, n)
|
||||
if err != nil {
|
||||
exit.WithError("Failed to get bootstrapper", err)
|
||||
return nil, errors.Wrap(err, "Failed to get bootstrapper")
|
||||
}
|
||||
|
||||
if err = bs.SetupCerts(cc.KubernetesConfig, n); err != nil {
|
||||
exit.WithError("setting up certs", err)
|
||||
return nil, errors.Wrap(err, "setting up certs")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -159,34 +159,34 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo
|
|||
// Skip pre-existing, because we already waited for health
|
||||
if viper.GetBool(waitUntilHealthy) && !preExists {
|
||||
if err := bs.WaitForNode(cc, n, viper.GetDuration(waitTimeout)); err != nil {
|
||||
exit.WithError("Wait failed", err)
|
||||
return nil, errors.Wrap(err, "Wait failed")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err := bs.UpdateNode(cc, n, cr); err != nil {
|
||||
exit.WithError("Updating node", err)
|
||||
return nil, errors.Wrap(err, "Updating node")
|
||||
}
|
||||
|
||||
cp, err := config.PrimaryControlPlane(&cc)
|
||||
if err != nil {
|
||||
exit.WithError("Getting primary control plane", err)
|
||||
return nil, errors.Wrap(err, "Getting primary control plane")
|
||||
}
|
||||
cpBs, err := cluster.Bootstrapper(machineAPI, viper.GetString(cmdcfg.Bootstrapper), cc, cp)
|
||||
if err != nil {
|
||||
exit.WithError("Getting bootstrapper", err)
|
||||
return nil, errors.Wrap(err, "Getting bootstrapper")
|
||||
}
|
||||
|
||||
joinCmd, err := cpBs.GenerateToken(cc)
|
||||
if err != nil {
|
||||
exit.WithError("generating join token", err)
|
||||
return nil, errors.Wrap(err, "generating join token")
|
||||
}
|
||||
|
||||
if err = bs.JoinCluster(cc, n, joinCmd); err != nil {
|
||||
exit.WithError("joining cluster", err)
|
||||
return nil, errors.Wrap(err, "joining cluster")
|
||||
}
|
||||
}
|
||||
|
||||
return kcs
|
||||
return kcs, nil
|
||||
}
|
||||
|
||||
// ConfigureRuntimes does what needs to happen to get a runtime going.
|
||||
|
|
Loading…
Reference in New Issue