mostly moving code around and adding UpdateNode
parent
4af82913e1
commit
f22efd871a
|
@ -49,16 +49,11 @@ var nodeAddCmd = &cobra.Command{
|
|||
}
|
||||
out.T(out.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": profile})
|
||||
|
||||
n, err := node.Add(mc, name, cp, worker, "", profile)
|
||||
err = node.Add(mc, name, cp, worker, "", profile)
|
||||
if err != nil {
|
||||
exit.WithError("Error adding node to cluster", err)
|
||||
}
|
||||
|
||||
err = node.Start(*mc, *n, false, nil)
|
||||
if err != nil {
|
||||
exit.WithError("Error starting node", err)
|
||||
}
|
||||
|
||||
out.T(out.Ready, "Successfully added {{.name}} to {{.cluster}}!", out.V{"name": name, "cluster": profile})
|
||||
},
|
||||
}
|
||||
|
|
|
@ -61,7 +61,7 @@ var nodeStartCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
// Start it up baby
|
||||
err = node.Start(*cc, *n, true, nil)
|
||||
err = node.Start(*cc, *n, nil)
|
||||
if err != nil {
|
||||
out.FatalT("Failed to start node {{.name}}", out.V{"name": name})
|
||||
}
|
||||
|
|
|
@ -118,6 +118,7 @@ const (
|
|||
autoUpdate = "auto-update-drivers"
|
||||
hostOnlyNicType = "host-only-nic-type"
|
||||
natNicType = "nat-nic-type"
|
||||
nodes = "nodes"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -160,7 +161,7 @@ func initMinikubeFlags() {
|
|||
startCmd.Flags().String(containerRuntime, "docker", "The container runtime to be used (docker, crio, containerd).")
|
||||
startCmd.Flags().Bool(createMount, false, "This will start the mount daemon and automatically mount files into minikube.")
|
||||
startCmd.Flags().String(mountString, constants.DefaultMountDir+":/minikube-host", "The argument to pass the minikube mount command on start.")
|
||||
startCmd.Flags().StringArrayVar(&node.AddonList, "addons", nil, "Enable addons. see `minikube addons list` for a list of valid addon names.")
|
||||
startCmd.Flags().StringArrayVar(&config.AddonList, "addons", nil, "Enable addons. see `minikube addons list` for a list of valid addon names.")
|
||||
startCmd.Flags().String(criSocket, "", "The cri socket path to be used.")
|
||||
startCmd.Flags().String(networkPlugin, "", "The name of the network plugin.")
|
||||
startCmd.Flags().Bool(enableDefaultCNI, false, "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \"--network-plugin=cni\".")
|
||||
|
@ -169,12 +170,13 @@ func initMinikubeFlags() {
|
|||
startCmd.Flags().Bool(nativeSSH, true, "Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'.")
|
||||
startCmd.Flags().Bool(autoUpdate, true, "If set, automatically updates drivers to the latest version. Defaults to true.")
|
||||
startCmd.Flags().Bool(installAddons, true, "If set, install addons. Defaults to true.")
|
||||
startCmd.Flags().IntP(nodes, "n", 1, "The number of nodes to spin up. Defaults to 1.")
|
||||
}
|
||||
|
||||
// initKubernetesFlags inits the commandline flags for kubernetes related options
|
||||
func initKubernetesFlags() {
|
||||
startCmd.Flags().String(kubernetesVersion, "", "The kubernetes version that the minikube VM will use (ex: v1.2.3)")
|
||||
startCmd.Flags().Var(&cluster.ExtraOptions, "extra-config",
|
||||
startCmd.Flags().Var(&config.ExtraOptions, "extra-config",
|
||||
`A set of key=value pairs that describe configuration that may be passed to different components.
|
||||
The key should be '.' separated, and the first part before the dot is the component to apply the configuration to.
|
||||
Valid components are: kubelet, kubeadm, apiserver, controller-manager, etcd, proxy, scheduler
|
||||
|
@ -226,8 +228,8 @@ func initNetworkingFlags() {
|
|||
startCmd.Flags().String(imageRepository, "", "Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to \"auto\" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers")
|
||||
startCmd.Flags().String(imageMirrorCountry, "", "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.")
|
||||
startCmd.Flags().String(serviceCIDR, constants.DefaultServiceCIDR, "The CIDR to be used for service cluster IPs.")
|
||||
startCmd.Flags().StringArrayVar(&node.DockerEnv, "docker-env", nil, "Environment variables to pass to the Docker daemon. (format: key=value)")
|
||||
startCmd.Flags().StringArrayVar(&node.DockerOpt, "docker-opt", nil, "Specify arbitrary flags to pass to the Docker daemon. (format: key=value)")
|
||||
startCmd.Flags().StringArrayVar(&config.DockerEnv, "docker-env", nil, "Environment variables to pass to the Docker daemon. (format: key=value)")
|
||||
startCmd.Flags().StringArrayVar(&config.DockerOpt, "docker-opt", nil, "Specify arbitrary flags to pass to the Docker daemon. (format: key=value)")
|
||||
}
|
||||
|
||||
// startCmd represents the start command
|
||||
|
@ -335,7 +337,14 @@ func runStart(cmd *cobra.Command, args []string) {
|
|||
existingAddons = existing.Addons
|
||||
}
|
||||
}
|
||||
kubeconfig, err := node.Start(mc, n, true, existingAddons)
|
||||
|
||||
// Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot.
|
||||
// Hence, saveConfig must be called before startHost, and again afterwards when we know the IP.
|
||||
if err := config.SaveProfile(viper.GetString(config.MachineProfile), &mc); err != nil {
|
||||
exit.WithError("Failed to save config", err)
|
||||
}
|
||||
|
||||
kubeconfig, err := cluster.InitialSetup(mc, n, existingAddons)
|
||||
if err != nil {
|
||||
exit.WithError("Starting node", err)
|
||||
}
|
||||
|
@ -343,6 +352,14 @@ func runStart(cmd *cobra.Command, args []string) {
|
|||
if err := showKubectlInfo(kubeconfig, k8sVersion, mc.Name); err != nil {
|
||||
glog.Errorf("kubectl info: %v", err)
|
||||
}
|
||||
|
||||
numNodes := viper.GetInt(nodes)
|
||||
if numNodes > 1 {
|
||||
for i := 0; i < numNodes-1; i++ {
|
||||
nodeName := fmt.Sprintf("%s%d", n.Name, i+1)
|
||||
node.Add(&mc, nodeName, false, true, "", "")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func updateDriver(driverName string) {
|
||||
|
@ -691,7 +708,7 @@ func validateFlags(cmd *cobra.Command, drvName string) {
|
|||
validateCPUCount(driver.BareMetal(drvName))
|
||||
|
||||
// check that kubeadm extra args contain only whitelisted parameters
|
||||
for param := range cluster.ExtraOptions.AsMap().Get(bsutil.Kubeadm) {
|
||||
for param := range config.ExtraOptions.AsMap().Get(bsutil.Kubeadm) {
|
||||
if !config.ContainsParam(bsutil.KubeadmExtraArgsWhitelist[bsutil.KubeadmCmdParam], param) &&
|
||||
!config.ContainsParam(bsutil.KubeadmExtraArgsWhitelist[bsutil.KubeadmConfigParam], param) {
|
||||
exit.UsageT("Sorry, the kubeadm.{{.parameter_name}} parameter is currently not supported by --extra-config", out.V{"parameter_name": param})
|
||||
|
@ -791,8 +808,8 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string)
|
|||
HyperkitVSockPorts: viper.GetStringSlice(vsockPorts),
|
||||
NFSShare: viper.GetStringSlice(nfsShare),
|
||||
NFSSharesRoot: viper.GetString(nfsSharesRoot),
|
||||
DockerEnv: node.DockerEnv,
|
||||
DockerOpt: node.DockerOpt,
|
||||
DockerEnv: config.DockerEnv,
|
||||
DockerOpt: config.DockerOpt,
|
||||
InsecureRegistry: insecureRegistry,
|
||||
RegistryMirror: registryMirror,
|
||||
HostOnlyCIDR: viper.GetString(hostOnlyCIDR),
|
||||
|
@ -824,7 +841,7 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string)
|
|||
NetworkPlugin: selectedNetworkPlugin,
|
||||
ServiceCIDR: viper.GetString(serviceCIDR),
|
||||
ImageRepository: repository,
|
||||
ExtraOptions: cluster.ExtraOptions,
|
||||
ExtraOptions: config.ExtraOptions,
|
||||
ShouldLoadCachedImages: viper.GetBool(cacheImages),
|
||||
EnableDefaultCNI: selectedEnableDefaultCNI,
|
||||
},
|
||||
|
@ -846,7 +863,7 @@ func setDockerProxy() {
|
|||
continue
|
||||
}
|
||||
}
|
||||
node.DockerEnv = append(node.DockerEnv, fmt.Sprintf("%s=%s", k, v))
|
||||
config.DockerEnv = append(config.DockerEnv, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -858,7 +875,7 @@ func autoSetDriverOptions(cmd *cobra.Command, drvName string) (err error) {
|
|||
if !cmd.Flags().Changed("extra-config") && len(hints.ExtraOptions) > 0 {
|
||||
for _, eo := range hints.ExtraOptions {
|
||||
glog.Infof("auto setting extra-config to %q.", eo)
|
||||
err = cluster.ExtraOptions.Set(eo)
|
||||
err = config.ExtraOptions.Set(eo)
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "setting extra option %s", eo)
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||
)
|
||||
|
||||
// LogOptions are options to be passed to LogCommands
|
||||
|
@ -40,7 +41,8 @@ type Bootstrapper interface {
|
|||
DeleteCluster(config.KubernetesConfig) error
|
||||
WaitForCluster(config.ClusterConfig, time.Duration) error
|
||||
JoinCluster(config.ClusterConfig, config.Node, string) error
|
||||
UpdateNode(config.ClusterConfig)
|
||||
UpdateNode(config.ClusterConfig, config.Node, cruntime.Manager) error
|
||||
GenerateToken(config.KubernetesConfig) (string, error)
|
||||
// LogCommands returns a map of log type to a command which will display that log.
|
||||
LogCommands(LogOptions) map[string]string
|
||||
SetupCerts(config.KubernetesConfig, config.Node) error
|
||||
|
|
|
@ -47,7 +47,7 @@ func AdjustResourceLimits(c command.Runner) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// ExistingConfig checks if there are config files from possible previous kubernets cluster
|
||||
// ExistingConfig checks if there are config files from possible previous kubernetes cluster
|
||||
func ExistingConfig(c command.Runner) error {
|
||||
args := append([]string{"ls"}, expectedRemoteArtifacts...)
|
||||
_, err := c.RunCmd(exec.Command("sudo", args...))
|
||||
|
|
|
@ -121,8 +121,10 @@ func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node)
|
|||
return errors.Wrap(err, "encoding kubeconfig")
|
||||
}
|
||||
|
||||
kubeCfgFile := assets.NewMemoryAsset(data, vmpath.GuestPersistentDir, "kubeconfig", "0644")
|
||||
copyableFiles = append(copyableFiles, kubeCfgFile)
|
||||
if n.ControlPlane {
|
||||
kubeCfgFile := assets.NewMemoryAsset(data, vmpath.GuestPersistentDir, "kubeconfig", "0644")
|
||||
copyableFiles = append(copyableFiles, kubeCfgFile)
|
||||
}
|
||||
|
||||
for _, f := range copyableFiles {
|
||||
if err := cmd.Copy(f); err != nil {
|
||||
|
|
|
@ -381,6 +381,20 @@ func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinC
|
|||
return nil
|
||||
}
|
||||
|
||||
// GenerateToken creates a token and returns the appropriate kubeadm join command to run
|
||||
func (k *Bootstrapper) GenerateToken(k8s config.KubernetesConfig) (string, error) {
|
||||
tokenCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s token create --print-join-command --ttl=0", bsutil.InvokeKubeadm(k8s.KubernetesVersion)))
|
||||
r, err := k.c.RunCmd(tokenCmd)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "generating bootstrap token")
|
||||
}
|
||||
joinCmd := r.Stdout.String()
|
||||
joinCmd = strings.Replace(joinCmd, "kubeadm", bsutil.InvokeKubeadm(k8s.KubernetesVersion), 1)
|
||||
joinCmd = fmt.Sprintf("%s --ignore-preflight-errors=all", strings.TrimSpace(joinCmd))
|
||||
|
||||
return joinCmd, nil
|
||||
}
|
||||
|
||||
// DeleteCluster removes the components that were started earlier
|
||||
func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error {
|
||||
version, err := bsutil.ParseKubernetesVersion(k8s.KubernetesVersion)
|
||||
|
@ -405,7 +419,7 @@ func (k *Bootstrapper) SetupCerts(k8s config.KubernetesConfig, n config.Node) er
|
|||
return bootstrapper.SetupCerts(k.c, k8s, n)
|
||||
}
|
||||
|
||||
// UpdateCluster updates the cluster
|
||||
// UpdateCluster updates the cluster.
|
||||
func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error {
|
||||
images, err := images.Kubeadm(cfg.KubernetesConfig.ImageRepository, cfg.KubernetesConfig.KubernetesVersion)
|
||||
if err != nil {
|
||||
|
@ -423,14 +437,24 @@ func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error {
|
|||
return errors.Wrap(err, "runtime")
|
||||
}
|
||||
|
||||
// TODO: multiple nodes
|
||||
kubeadmCfg, err := bsutil.GenerateKubeadmYAML(cfg, r, cfg.Nodes[0])
|
||||
for _, n := range cfg.Nodes {
|
||||
err := k.UpdateNode(cfg, n, r)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "updating node")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateNode updates a node.
|
||||
func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cruntime.Manager) error {
|
||||
kubeadmCfg, err := bsutil.GenerateKubeadmYAML(cfg, r, n)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "generating kubeadm cfg")
|
||||
}
|
||||
|
||||
// TODO: multiple nodes
|
||||
kubeletCfg, err := bsutil.NewKubeletConfig(cfg, cfg.Nodes[0], r)
|
||||
kubeletCfg, err := bsutil.NewKubeletConfig(cfg, n, r)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "generating kubelet config")
|
||||
}
|
||||
|
|
|
@ -26,12 +26,9 @@ import (
|
|||
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper/kubeadm"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
)
|
||||
|
||||
var ExtraOptions config.ExtraOptionSlice
|
||||
|
||||
// This init function is used to set the logtostderr variable to false so that INFO level log info does not clutter the CLI
|
||||
// INFO lvl logging is displayed due to the kubernetes api calling flag.Set("logtostderr", "true") in its init()
|
||||
// see: https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/util/logs/logs.go#L32-L34
|
||||
|
|
|
@ -17,23 +17,33 @@ limitations under the License.
|
|||
package cluster
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/machine/libmachine"
|
||||
"github.com/docker/machine/libmachine/host"
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/viper"
|
||||
cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config"
|
||||
"k8s.io/minikube/pkg/addons"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
|
||||
"k8s.io/minikube/pkg/minikube/command"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/kubeconfig"
|
||||
"k8s.io/minikube/pkg/minikube/localpath"
|
||||
"k8s.io/minikube/pkg/minikube/logs"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
"k8s.io/minikube/pkg/minikube/proxy"
|
||||
"k8s.io/minikube/pkg/util/retry"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -41,11 +51,13 @@ const (
|
|||
waitUntilHealthy = "wait"
|
||||
embedCerts = "embed-certs"
|
||||
keepContext = "keep-context"
|
||||
imageRepository = "image-repository"
|
||||
containerRuntime = "container-runtime"
|
||||
)
|
||||
|
||||
// InitialSetup performs all necessary operations on the initial control plane node when first spinning up a cluster
|
||||
func InitialSetup(cc config.ClusterConfig, n config.Node, cr cruntime.Manager) (*kubeconfig.Settings, error) {
|
||||
mRunner, preExists, machineAPI, host := StartMachine(&cc, &n)
|
||||
func InitialSetup(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool) (*kubeconfig.Settings, error) {
|
||||
_, preExists, machineAPI, host := StartMachine(&cc, &n)
|
||||
defer machineAPI.Close()
|
||||
|
||||
// Must be written before bootstrap, otherwise health checks may flake due to stale IP
|
||||
|
@ -59,8 +71,17 @@ func InitialSetup(cc config.ClusterConfig, n config.Node, cr cruntime.Manager) (
|
|||
|
||||
// pull images or restart cluster
|
||||
out.T(out.Launch, "Launching Kubernetes ... ")
|
||||
if err := bs.StartCluster(cc); err != nil {
|
||||
exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, mRunner))
|
||||
err = bs.StartCluster(cc)
|
||||
if err != nil {
|
||||
/*config := cruntime.Config{Type: viper.GetString(containerRuntime), Runner: mRunner, ImageRepository: cc.KubernetesConfig.ImageRepository, KubernetesVersion: cc.KubernetesConfig.KubernetesVersion}
|
||||
cr, err := cruntime.New(config)
|
||||
exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, mRunner))*/
|
||||
exit.WithError("Error starting cluster", err)
|
||||
}
|
||||
|
||||
// enable addons, both old and new!
|
||||
if existingAddons != nil {
|
||||
addons.Start(viper.GetString(config.MachineProfile), existingAddons, config.AddonList)
|
||||
}
|
||||
|
||||
// Skip pre-existing, because we already waited for health
|
||||
|
@ -80,7 +101,7 @@ func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node)
|
|||
if err != nil {
|
||||
exit.WithError("Failed to get bootstrapper", err)
|
||||
}
|
||||
for _, eo := range ExtraOptions {
|
||||
for _, eo := range config.ExtraOptions {
|
||||
out.T(out.Option, "{{.extra_option_component_name}}.{{.key}}={{.value}}", out.V{"extra_option_component_name": eo.Component, "key": eo.Key, "value": eo.Value})
|
||||
}
|
||||
// Loads cached images, generates config files, download binaries
|
||||
|
@ -122,3 +143,145 @@ func setupKubeconfig(h *host.Host, c *config.ClusterConfig, n *config.Node, clus
|
|||
}
|
||||
return kcs, nil
|
||||
}
|
||||
|
||||
// StartMachine starts a VM
|
||||
func StartMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) {
|
||||
m, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
exit.WithError("Failed to get machine client", err)
|
||||
}
|
||||
host, preExists = startHost(m, *cfg, *node)
|
||||
runner, err = machine.CommandRunner(host)
|
||||
if err != nil {
|
||||
exit.WithError("Failed to get command runner", err)
|
||||
}
|
||||
|
||||
ip := validateNetwork(host, runner)
|
||||
|
||||
// Bypass proxy for minikube's vm host ip
|
||||
err = proxy.ExcludeIP(ip)
|
||||
if err != nil {
|
||||
out.ErrT(out.FailureType, "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.", out.V{"ip": ip})
|
||||
}
|
||||
|
||||
node.IP = ip
|
||||
config.SaveNodeToProfile(cfg, node)
|
||||
|
||||
return runner, preExists, m, host
|
||||
}
|
||||
|
||||
// startHost starts a new minikube host using a VM or None
|
||||
func startHost(api libmachine.API, mc config.ClusterConfig, n config.Node) (*host.Host, bool) {
|
||||
exists, err := api.Exists(n.Name)
|
||||
if err != nil {
|
||||
exit.WithError("Failed to check if machine exists", err)
|
||||
}
|
||||
|
||||
host, err := machine.StartHost(api, mc, n)
|
||||
if err != nil {
|
||||
exit.WithError("Unable to start VM. Please investigate and run 'minikube delete' if possible", err)
|
||||
}
|
||||
return host, exists
|
||||
}
|
||||
|
||||
// validateNetwork tries to catch network problems as soon as possible
|
||||
func validateNetwork(h *host.Host, r command.Runner) string {
|
||||
ip, err := h.Driver.GetIP()
|
||||
if err != nil {
|
||||
exit.WithError("Unable to get VM IP address", err)
|
||||
}
|
||||
|
||||
optSeen := false
|
||||
warnedOnce := false
|
||||
for _, k := range proxy.EnvVars {
|
||||
if v := os.Getenv(k); v != "" {
|
||||
if !optSeen {
|
||||
out.T(out.Internet, "Found network options:")
|
||||
optSeen = true
|
||||
}
|
||||
out.T(out.Option, "{{.key}}={{.value}}", out.V{"key": k, "value": v})
|
||||
ipExcluded := proxy.IsIPExcluded(ip) // Skip warning if minikube ip is already in NO_PROXY
|
||||
k = strings.ToUpper(k) // for http_proxy & https_proxy
|
||||
if (k == "HTTP_PROXY" || k == "HTTPS_PROXY") && !ipExcluded && !warnedOnce {
|
||||
out.WarningT("You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details", out.V{"ip_address": ip, "documentation_url": "https://minikube.sigs.k8s.io/docs/reference/networking/proxy/"})
|
||||
warnedOnce = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !driver.BareMetal(h.Driver.DriverName()) && !driver.IsKIC(h.Driver.DriverName()) {
|
||||
trySSH(h, ip)
|
||||
}
|
||||
|
||||
tryLookup(r)
|
||||
tryRegistry(r)
|
||||
return ip
|
||||
}
|
||||
|
||||
func trySSH(h *host.Host, ip string) {
|
||||
if viper.GetBool("force") {
|
||||
return
|
||||
}
|
||||
|
||||
sshAddr := net.JoinHostPort(ip, "22")
|
||||
|
||||
dial := func() (err error) {
|
||||
d := net.Dialer{Timeout: 3 * time.Second}
|
||||
conn, err := d.Dial("tcp", sshAddr)
|
||||
if err != nil {
|
||||
out.WarningT("Unable to verify SSH connectivity: {{.error}}. Will retry...", out.V{"error": err})
|
||||
return err
|
||||
}
|
||||
_ = conn.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := retry.Expo(dial, time.Second, 13*time.Second); err != nil {
|
||||
exit.WithCodeT(exit.IO, `minikube is unable to connect to the VM: {{.error}}
|
||||
|
||||
This is likely due to one of two reasons:
|
||||
|
||||
- VPN or firewall interference
|
||||
- {{.hypervisor}} network configuration issue
|
||||
|
||||
Suggested workarounds:
|
||||
|
||||
- Disable your local VPN or firewall software
|
||||
- Configure your local VPN or firewall to allow access to {{.ip}}
|
||||
- Restart or reinstall {{.hypervisor}}
|
||||
- Use an alternative --vm-driver
|
||||
- Use --force to override this connectivity check
|
||||
`, out.V{"error": err, "hypervisor": h.Driver.DriverName(), "ip": ip})
|
||||
}
|
||||
}
|
||||
|
||||
func tryLookup(r command.Runner) {
|
||||
// DNS check
|
||||
if rr, err := r.RunCmd(exec.Command("nslookup", "kubernetes.io", "-type=ns")); err != nil {
|
||||
glog.Infof("%s failed: %v which might be okay will retry nslookup without query type", rr.Args, err)
|
||||
// will try with without query type for ISOs with different busybox versions.
|
||||
if _, err = r.RunCmd(exec.Command("nslookup", "kubernetes.io")); err != nil {
|
||||
glog.Warningf("nslookup failed: %v", err)
|
||||
out.WarningT("Node may be unable to resolve external DNS records")
|
||||
}
|
||||
}
|
||||
}
|
||||
func tryRegistry(r command.Runner) {
|
||||
// Try an HTTPS connection to the image repository
|
||||
proxy := os.Getenv("HTTPS_PROXY")
|
||||
opts := []string{"-sS"}
|
||||
if proxy != "" && !strings.HasPrefix(proxy, "localhost") && !strings.HasPrefix(proxy, "127.0") {
|
||||
opts = append([]string{"-x", proxy}, opts...)
|
||||
}
|
||||
|
||||
repo := viper.GetString(imageRepository)
|
||||
if repo == "" {
|
||||
repo = images.DefaultKubernetesRepo
|
||||
}
|
||||
|
||||
opts = append(opts, fmt.Sprintf("https://%s/", repo))
|
||||
if rr, err := r.RunCmd(exec.Command("curl", opts...)); err != nil {
|
||||
glog.Warningf("%s failed: %v", rr.Args, err)
|
||||
out.WarningT("VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository", out.V{"repository": repo})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,6 +16,17 @@ limitations under the License.
|
|||
|
||||
package config
|
||||
|
||||
var (
|
||||
// DockerEnv contains the environment variables
|
||||
DockerEnv []string
|
||||
// DockerOpt contains the option parameters
|
||||
DockerOpt []string
|
||||
// ExtraOptions contains extra options (if any)
|
||||
ExtraOptions ExtraOptionSlice
|
||||
// AddonList contains the list of addons
|
||||
AddonList []string
|
||||
)
|
||||
|
||||
// AddNode adds a new node config to an existing cluster.
|
||||
func AddNode(cc *ClusterConfig, name string, controlPlane bool, k8sVersion string, profileName string) error {
|
||||
node := Node{
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/pkg/drivers/kic/oci"
|
||||
"k8s.io/minikube/pkg/minikube/localpath"
|
||||
"k8s.io/minikube/pkg/util/lock"
|
||||
|
@ -90,6 +91,23 @@ func CreateEmptyProfile(name string, miniHome ...string) error {
|
|||
return SaveProfile(name, cfg, miniHome...)
|
||||
}
|
||||
|
||||
// SaveNodeToProfile saves a node to a cluster
|
||||
func SaveNodeToProfile(cfg *ClusterConfig, node *Node) error {
|
||||
update := false
|
||||
for i, n := range cfg.Nodes {
|
||||
if n.Name == node.Name {
|
||||
cfg.Nodes[i] = *node
|
||||
update = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !update {
|
||||
cfg.Nodes = append(cfg.Nodes, *node)
|
||||
}
|
||||
return SaveProfile(viper.GetString(MachineProfile), cfg)
|
||||
}
|
||||
|
||||
// SaveProfile creates an profile out of the cfg and stores in $MINIKUBE_HOME/profiles/<profilename>/config.json
|
||||
func SaveProfile(name string, cfg *ClusterConfig, miniHome ...string) error {
|
||||
data, err := json.MarshalIndent(cfg, "", " ")
|
||||
|
|
|
@ -130,8 +130,10 @@ func TestStartHostExists(t *testing.T) {
|
|||
|
||||
mc := defaultClusterConfig
|
||||
mc.Name = ih.Name
|
||||
|
||||
n := config.Node{Name: ih.Name}
|
||||
// This should pass without calling Create because the host exists already.
|
||||
h, err := StartHost(api, mc)
|
||||
h, err := StartHost(api, mc, n)
|
||||
if err != nil {
|
||||
t.Fatalf("Error starting host: %v", err)
|
||||
}
|
||||
|
@ -162,8 +164,10 @@ func TestStartHostErrMachineNotExist(t *testing.T) {
|
|||
mc := defaultClusterConfig
|
||||
mc.Name = h.Name
|
||||
|
||||
n := config.Node{Name: h.Name}
|
||||
|
||||
// This should pass with creating host, while machine does not exist.
|
||||
h, err = StartHost(api, mc)
|
||||
h, err = StartHost(api, mc, n)
|
||||
if err != nil {
|
||||
if err != ErrorMachineNotExist {
|
||||
t.Fatalf("Error starting host: %v", err)
|
||||
|
@ -172,8 +176,10 @@ func TestStartHostErrMachineNotExist(t *testing.T) {
|
|||
|
||||
mc.Name = h.Name
|
||||
|
||||
n.Name = h.Name
|
||||
|
||||
// Second call. This should pass without calling Create because the host exists already.
|
||||
h, err = StartHost(api, mc)
|
||||
h, err = StartHost(api, mc, n)
|
||||
if err != nil {
|
||||
t.Fatalf("Error starting host: %v", err)
|
||||
}
|
||||
|
@ -205,7 +211,10 @@ func TestStartStoppedHost(t *testing.T) {
|
|||
provision.SetDetector(md)
|
||||
mc := defaultClusterConfig
|
||||
mc.Name = h.Name
|
||||
h, err = StartHost(api, mc)
|
||||
|
||||
n := config.Node{Name: h.Name}
|
||||
|
||||
h, err = StartHost(api, mc, n)
|
||||
if err != nil {
|
||||
t.Fatal("Error starting host.")
|
||||
}
|
||||
|
@ -233,7 +242,9 @@ func TestStartHost(t *testing.T) {
|
|||
md := &tests.MockDetector{Provisioner: &tests.MockProvisioner{}}
|
||||
provision.SetDetector(md)
|
||||
|
||||
h, err := StartHost(api, defaultClusterConfig)
|
||||
n := config.Node{Name: viper.GetString("profile")}
|
||||
|
||||
h, err := StartHost(api, defaultClusterConfig, n)
|
||||
if err != nil {
|
||||
t.Fatal("Error starting host.")
|
||||
}
|
||||
|
@ -261,14 +272,16 @@ func TestStartHostConfig(t *testing.T) {
|
|||
md := &tests.MockDetector{Provisioner: &tests.MockProvisioner{}}
|
||||
provision.SetDetector(md)
|
||||
|
||||
config := config.ClusterConfig{
|
||||
cfg := config.ClusterConfig{
|
||||
Driver: driver.Mock,
|
||||
DockerEnv: []string{"FOO=BAR"},
|
||||
DockerOpt: []string{"param=value"},
|
||||
Downloader: MockDownloader{},
|
||||
}
|
||||
|
||||
h, err := StartHost(api, config)
|
||||
n := config.Node{Name: viper.GetString("profile")}
|
||||
|
||||
h, err := StartHost(api, cfg, n)
|
||||
if err != nil {
|
||||
t.Fatal("Error starting host.")
|
||||
}
|
||||
|
|
|
@ -54,16 +54,16 @@ var (
|
|||
)
|
||||
|
||||
// fixHost fixes up a previously configured VM so that it is ready to run Kubernetes
|
||||
func fixHost(api libmachine.API, mc config.ClusterConfig) (*host.Host, error) {
|
||||
func fixHost(api libmachine.API, mc config.ClusterConfig, n config.Node) (*host.Host, error) {
|
||||
out.T(out.Waiting, "Reconfiguring existing host ...")
|
||||
|
||||
start := time.Now()
|
||||
glog.Infof("fixHost starting: %s", mc.Name)
|
||||
glog.Infof("fixHost starting: %s", n.Name)
|
||||
defer func() {
|
||||
glog.Infof("fixHost completed within %s", time.Since(start))
|
||||
}()
|
||||
|
||||
h, err := api.Load(mc.Name)
|
||||
h, err := api.Load(n.Name)
|
||||
if err != nil {
|
||||
return h, errors.Wrap(err, "Error loading existing host. Please try running [minikube delete], then run [minikube start] again.")
|
||||
}
|
||||
|
|
|
@ -61,9 +61,9 @@ var (
|
|||
)
|
||||
|
||||
// StartHost starts a host VM.
|
||||
func StartHost(api libmachine.API, cfg config.ClusterConfig) (*host.Host, error) {
|
||||
func StartHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*host.Host, error) {
|
||||
// Prevent machine-driver boot races, as well as our own certificate race
|
||||
releaser, err := acquireMachinesLock(cfg.Name)
|
||||
releaser, err := acquireMachinesLock(n.Name)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "boot lock")
|
||||
}
|
||||
|
@ -73,16 +73,16 @@ func StartHost(api libmachine.API, cfg config.ClusterConfig) (*host.Host, error)
|
|||
releaser.Release()
|
||||
}()
|
||||
|
||||
exists, err := api.Exists(cfg.Name)
|
||||
exists, err := api.Exists(n.Name)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "exists: %s", cfg.Name)
|
||||
return nil, errors.Wrapf(err, "exists: %s", n.Name)
|
||||
}
|
||||
if !exists {
|
||||
glog.Infof("Provisioning new machine with config: %+v", cfg)
|
||||
glog.Infof("Provisioning new machine with config: %+v", n)
|
||||
return createHost(api, cfg)
|
||||
}
|
||||
glog.Infoln("Skipping create...Using existing machine configuration")
|
||||
return fixHost(api, cfg)
|
||||
return fixHost(api, cfg, n)
|
||||
}
|
||||
|
||||
func engineOptions(cfg config.ClusterConfig) *engine.Options {
|
||||
|
|
|
@ -35,17 +35,6 @@ import (
|
|||
"k8s.io/minikube/pkg/util/lock"
|
||||
)
|
||||
|
||||
var (
|
||||
// DockerEnv contains the environment variables
|
||||
DockerEnv []string
|
||||
// DockerOpt contains the option parameters
|
||||
DockerOpt []string
|
||||
// ExtraOptions contains extra options (if any)
|
||||
ExtraOptions config.ExtraOptionSlice
|
||||
// AddonList contains the list of addons
|
||||
AddonList []string
|
||||
)
|
||||
|
||||
// configureRuntimes does what needs to happen to get a runtime going.
|
||||
func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config.KubernetesConfig) cruntime.Manager {
|
||||
config := cruntime.Config{Type: viper.GetString(containerRuntime), Runner: runner, ImageRepository: k8s.ImageRepository, KubernetesVersion: k8s.KubernetesVersion}
|
||||
|
@ -69,66 +58,14 @@ func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config
|
|||
func showVersionInfo(k8sVersion string, cr cruntime.Manager) {
|
||||
version, _ := cr.Version()
|
||||
out.T(cr.Style(), "Preparing Kubernetes {{.k8sVersion}} on {{.runtime}} {{.runtimeVersion}} ...", out.V{"k8sVersion": k8sVersion, "runtime": cr.Name(), "runtimeVersion": version})
|
||||
for _, v := range DockerOpt {
|
||||
for _, v := range config.DockerOpt {
|
||||
out.T(out.Option, "opt {{.docker_option}}", out.V{"docker_option": v})
|
||||
}
|
||||
for _, v := range DockerEnv {
|
||||
for _, v := range config.DockerEnv {
|
||||
out.T(out.Option, "env {{.docker_env}}", out.V{"docker_env": v})
|
||||
}
|
||||
}
|
||||
|
||||
<<<<<<< HEAD
|
||||
=======
|
||||
// setupKubeAdm adds any requested files into the VM before Kubernetes is started
|
||||
func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, node config.Node) bootstrapper.Bootstrapper {
|
||||
bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper))
|
||||
if err != nil {
|
||||
exit.WithError("Failed to get bootstrapper", err)
|
||||
}
|
||||
for _, eo := range ExtraOptions {
|
||||
out.T(out.Option, "{{.extra_option_component_name}}.{{.key}}={{.value}}", out.V{"extra_option_component_name": eo.Component, "key": eo.Key, "value": eo.Value})
|
||||
}
|
||||
// Loads cached images, generates config files, download binaries
|
||||
if err := bs.UpdateCluster(cfg); err != nil {
|
||||
exit.WithError("Failed to update cluster", err)
|
||||
}
|
||||
if err := bs.SetupCerts(cfg.KubernetesConfig, node); err != nil {
|
||||
exit.WithError("Failed to setup certs", err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func setupKubeconfig(h *host.Host, c *config.ClusterConfig, n *config.Node, clusterName string) (*kubeconfig.Settings, error) {
|
||||
addr, err := h.Driver.GetURL()
|
||||
if err != nil {
|
||||
exit.WithError("Failed to get driver URL", err)
|
||||
}
|
||||
if !driver.IsKIC(h.DriverName) {
|
||||
addr = strings.Replace(addr, "tcp://", "https://", -1)
|
||||
addr = strings.Replace(addr, ":2376", ":"+strconv.Itoa(n.Port), -1)
|
||||
}
|
||||
|
||||
if c.KubernetesConfig.APIServerName != constants.APIServerName {
|
||||
addr = strings.Replace(addr, n.IP, c.KubernetesConfig.APIServerName, -1)
|
||||
}
|
||||
kcs := &kubeconfig.Settings{
|
||||
ClusterName: clusterName,
|
||||
ClusterServerAddress: addr,
|
||||
ClientCertificate: localpath.MakeMiniPath("client.crt"),
|
||||
ClientKey: localpath.MakeMiniPath("client.key"),
|
||||
CertificateAuthority: localpath.MakeMiniPath("ca.crt"),
|
||||
KeepContext: viper.GetBool(keepContext),
|
||||
EmbedCerts: viper.GetBool(embedCerts),
|
||||
}
|
||||
|
||||
kcs.SetPath(kubeconfig.PathFromEnv())
|
||||
if err := kubeconfig.Update(kcs); err != nil {
|
||||
return kcs, err
|
||||
}
|
||||
return kcs, nil
|
||||
}
|
||||
|
||||
>>>>>>> c4e2236e2b2966cb05fa11b3bdc8cf1d060a270c
|
||||
// configureMounts configures any requested filesystem mounts
|
||||
func configureMounts() {
|
||||
if !viper.GetBool(createMount) {
|
||||
|
|
|
@ -1,185 +0,0 @@
|
|||
/*
|
||||
Copyright 2020 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/machine/libmachine"
|
||||
"github.com/docker/machine/libmachine/host"
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
|
||||
"k8s.io/minikube/pkg/minikube/command"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
"k8s.io/minikube/pkg/minikube/proxy"
|
||||
"k8s.io/minikube/pkg/util/retry"
|
||||
)
|
||||
|
||||
// StartMachine starts a VM
|
||||
func StartMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) {
|
||||
m, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
exit.WithError("Failed to get machine client", err)
|
||||
}
|
||||
host, preExists = startHost(m, *cfg)
|
||||
runner, err = machine.CommandRunner(host)
|
||||
if err != nil {
|
||||
exit.WithError("Failed to get command runner", err)
|
||||
}
|
||||
|
||||
ip := validateNetwork(host, runner)
|
||||
|
||||
// Bypass proxy for minikube's vm host ip
|
||||
err = proxy.ExcludeIP(ip)
|
||||
if err != nil {
|
||||
out.ErrT(out.FailureType, "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.", out.V{"ip": ip})
|
||||
}
|
||||
// Save IP to configuration file for subsequent use
|
||||
node.IP = ip
|
||||
|
||||
if err := Save(cfg, node); err != nil {
|
||||
exit.WithError("Failed to save config", err)
|
||||
}
|
||||
|
||||
return runner, preExists, m, host
|
||||
}
|
||||
|
||||
// startHost starts a new minikube host using a VM or None
|
||||
func startHost(api libmachine.API, mc config.ClusterConfig) (*host.Host, bool) {
|
||||
exists, err := api.Exists(mc.Name)
|
||||
if err != nil {
|
||||
exit.WithError("Failed to check if machine exists", err)
|
||||
}
|
||||
|
||||
host, err := machine.StartHost(api, mc)
|
||||
if err != nil {
|
||||
exit.WithError("Unable to start VM. Please investigate and run 'minikube delete' if possible", err)
|
||||
}
|
||||
return host, exists
|
||||
}
|
||||
|
||||
// validateNetwork tries to catch network problems as soon as possible
|
||||
func validateNetwork(h *host.Host, r command.Runner) string {
|
||||
ip, err := h.Driver.GetIP()
|
||||
if err != nil {
|
||||
exit.WithError("Unable to get VM IP address", err)
|
||||
}
|
||||
|
||||
optSeen := false
|
||||
warnedOnce := false
|
||||
for _, k := range proxy.EnvVars {
|
||||
if v := os.Getenv(k); v != "" {
|
||||
if !optSeen {
|
||||
out.T(out.Internet, "Found network options:")
|
||||
optSeen = true
|
||||
}
|
||||
out.T(out.Option, "{{.key}}={{.value}}", out.V{"key": k, "value": v})
|
||||
ipExcluded := proxy.IsIPExcluded(ip) // Skip warning if minikube ip is already in NO_PROXY
|
||||
k = strings.ToUpper(k) // for http_proxy & https_proxy
|
||||
if (k == "HTTP_PROXY" || k == "HTTPS_PROXY") && !ipExcluded && !warnedOnce {
|
||||
out.WarningT("You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details", out.V{"ip_address": ip, "documentation_url": "https://minikube.sigs.k8s.io/docs/reference/networking/proxy/"})
|
||||
warnedOnce = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !driver.BareMetal(h.Driver.DriverName()) && !driver.IsKIC(h.Driver.DriverName()) {
|
||||
trySSH(h, ip)
|
||||
}
|
||||
|
||||
tryLookup(r)
|
||||
tryRegistry(r)
|
||||
return ip
|
||||
}
|
||||
|
||||
func trySSH(h *host.Host, ip string) {
|
||||
if viper.GetBool("force") {
|
||||
return
|
||||
}
|
||||
|
||||
sshAddr := net.JoinHostPort(ip, "22")
|
||||
|
||||
dial := func() (err error) {
|
||||
d := net.Dialer{Timeout: 3 * time.Second}
|
||||
conn, err := d.Dial("tcp", sshAddr)
|
||||
if err != nil {
|
||||
out.WarningT("Unable to verify SSH connectivity: {{.error}}. Will retry...", out.V{"error": err})
|
||||
return err
|
||||
}
|
||||
_ = conn.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := retry.Expo(dial, time.Second, 13*time.Second); err != nil {
|
||||
exit.WithCodeT(exit.IO, `minikube is unable to connect to the VM: {{.error}}
|
||||
|
||||
This is likely due to one of two reasons:
|
||||
|
||||
- VPN or firewall interference
|
||||
- {{.hypervisor}} network configuration issue
|
||||
|
||||
Suggested workarounds:
|
||||
|
||||
- Disable your local VPN or firewall software
|
||||
- Configure your local VPN or firewall to allow access to {{.ip}}
|
||||
- Restart or reinstall {{.hypervisor}}
|
||||
- Use an alternative --vm-driver
|
||||
- Use --force to override this connectivity check
|
||||
`, out.V{"error": err, "hypervisor": h.Driver.DriverName(), "ip": ip})
|
||||
}
|
||||
}
|
||||
|
||||
func tryLookup(r command.Runner) {
|
||||
// DNS check
|
||||
if rr, err := r.RunCmd(exec.Command("nslookup", "kubernetes.io", "-type=ns")); err != nil {
|
||||
glog.Infof("%s failed: %v which might be okay will retry nslookup without query type", rr.Args, err)
|
||||
// will try with without query type for ISOs with different busybox versions.
|
||||
if _, err = r.RunCmd(exec.Command("nslookup", "kubernetes.io")); err != nil {
|
||||
glog.Warningf("nslookup failed: %v", err)
|
||||
out.WarningT("Node may be unable to resolve external DNS records")
|
||||
}
|
||||
}
|
||||
}
|
||||
func tryRegistry(r command.Runner) {
|
||||
// Try an HTTPS connection to the image repository
|
||||
proxy := os.Getenv("HTTPS_PROXY")
|
||||
opts := []string{"-sS"}
|
||||
if proxy != "" && !strings.HasPrefix(proxy, "localhost") && !strings.HasPrefix(proxy, "127.0") {
|
||||
opts = append([]string{"-x", proxy}, opts...)
|
||||
}
|
||||
|
||||
repo := viper.GetString(imageRepository)
|
||||
if repo == "" {
|
||||
repo = images.DefaultKubernetesRepo
|
||||
}
|
||||
|
||||
opts = append(opts, fmt.Sprintf("https://%s/", repo))
|
||||
if rr, err := r.RunCmd(exec.Command("curl", opts...)); err != nil {
|
||||
glog.Warningf("%s failed: %v", rr.Args, err)
|
||||
out.WarningT("VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository", out.V{"repository": repo})
|
||||
}
|
||||
}
|
|
@ -25,7 +25,6 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
imageRepository = "image-repository"
|
||||
cacheImages = "cache-images"
|
||||
waitUntilHealthy = "wait"
|
||||
cacheImageConfigKey = "cache"
|
||||
|
@ -38,7 +37,7 @@ const (
|
|||
)
|
||||
|
||||
// Add adds a new node config to an existing cluster.
|
||||
func Add(cc *config.ClusterConfig, name string, controlPlane bool, worker bool, k8sVersion string, profileName string) (*config.Node, error) {
|
||||
func Add(cc *config.ClusterConfig, name string, controlPlane bool, worker bool, k8sVersion string, profileName string) error {
|
||||
n := config.Node{
|
||||
Name: name,
|
||||
Worker: true,
|
||||
|
@ -62,11 +61,12 @@ func Add(cc *config.ClusterConfig, name string, controlPlane bool, worker bool,
|
|||
cc.Nodes = append(cc.Nodes, n)
|
||||
err := config.SaveProfile(profileName, cc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = Start(*cc, n, false, nil)
|
||||
return &n, err
|
||||
err = Start(*cc, n, nil)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete stops and deletes the given node from the given cluster
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
|
||||
"github.com/spf13/viper"
|
||||
"golang.org/x/sync/errgroup"
|
||||
cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config"
|
||||
"k8s.io/minikube/pkg/addons"
|
||||
"k8s.io/minikube/pkg/minikube/cluster"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
|
@ -32,25 +33,25 @@ import (
|
|||
)
|
||||
|
||||
// Start spins up a guest and starts the kubernetes node.
|
||||
func Start(mc config.ClusterConfig, n config.Node, preExists bool, existingAddons map[string]bool) error {
|
||||
func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool) error {
|
||||
// Now that the ISO is downloaded, pull images in the background while the VM boots.
|
||||
var cacheGroup errgroup.Group
|
||||
beginCacheRequiredImages(&cacheGroup, mc.KubernetesConfig.ImageRepository, n.KubernetesVersion)
|
||||
beginCacheRequiredImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion)
|
||||
|
||||
// Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot.
|
||||
// Hence, saveConfig must be called before startHost, and again afterwards when we know the IP.
|
||||
if err := config.SaveProfile(viper.GetString(config.MachineProfile), &mc); err != nil {
|
||||
exit.WithError("Failed to save config", err)
|
||||
runner, preExists, mAPI, _ := cluster.StartMachine(&cc, &n)
|
||||
defer mAPI.Close()
|
||||
|
||||
bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), n.Name)
|
||||
if err != nil {
|
||||
exit.WithError("Failed to get bootstrapper", err)
|
||||
}
|
||||
|
||||
bs, err := cluster.Bootstrapper()
|
||||
|
||||
k8sVersion := mc.KubernetesConfig.KubernetesVersion
|
||||
driverName := mc.Driver
|
||||
k8sVersion := cc.KubernetesConfig.KubernetesVersion
|
||||
driverName := cc.Driver
|
||||
// exits here in case of --download-only option.
|
||||
handleDownloadOnly(&cacheGroup, k8sVersion)
|
||||
// configure the runtime (docker, containerd, crio)
|
||||
cr := configureRuntimes(mRunner, driverName, mc.KubernetesConfig)
|
||||
cr := configureRuntimes(runner, driverName, cc.KubernetesConfig)
|
||||
showVersionInfo(k8sVersion, cr)
|
||||
waitCacheRequiredImages(&cacheGroup)
|
||||
|
||||
|
@ -58,7 +59,11 @@ func Start(mc config.ClusterConfig, n config.Node, preExists bool, existingAddon
|
|||
|
||||
// enable addons, both old and new!
|
||||
if existingAddons != nil {
|
||||
addons.Start(viper.GetString(config.MachineProfile), existingAddons, AddonList)
|
||||
addons.Start(viper.GetString(config.MachineProfile), existingAddons, config.AddonList)
|
||||
}
|
||||
|
||||
if err := bs.UpdateNode(cc, n, cr); err != nil {
|
||||
exit.WithError("Failed to update node", err)
|
||||
}
|
||||
|
||||
if err := CacheAndLoadImagesInConfig(); err != nil {
|
||||
|
@ -66,18 +71,30 @@ func Start(mc config.ClusterConfig, n config.Node, preExists bool, existingAddon
|
|||
}
|
||||
|
||||
// special ops for none , like change minikube directory.
|
||||
if driverName == driver.None {
|
||||
// multinode super doesn't work on the none driver
|
||||
if driverName == driver.None && len(cc.Nodes) == 1 {
|
||||
prepareNone()
|
||||
}
|
||||
|
||||
// Skip pre-existing, because we already waited for health
|
||||
if viper.GetBool(waitUntilHealthy) && !preExists {
|
||||
if err := bs.WaitForCluster(mc, viper.GetDuration(waitTimeout)); err != nil {
|
||||
if err := bs.WaitForCluster(cc, viper.GetDuration(waitTimeout)); err != nil {
|
||||
exit.WithError("Wait failed", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
bs.SetupCerts(cc.KubernetesConfig, n)
|
||||
|
||||
cp, err := config.PrimaryControlPlane(cc)
|
||||
if err != nil {
|
||||
exit.WithError("Getting primary control plane", err)
|
||||
}
|
||||
cpBs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cp.Name)
|
||||
if err != nil {
|
||||
exit.WithError("Getting bootstrapper", err)
|
||||
}
|
||||
joinCmd, err := cpBs.GenerateToken(cc.KubernetesConfig)
|
||||
return bs.JoinCluster(cc, n, joinCmd)
|
||||
}
|
||||
|
||||
// prepareNone prepares the user and host for the joy of the "none" driver
|
||||
|
|
Loading…
Reference in New Issue