dramatically simplify start code path

pull/6787/head
Sharif Elgamal 2020-03-18 15:04:56 -07:00
parent 66a6f4e906
commit a24aa5dff7
9 changed files with 409 additions and 650 deletions

View File

@ -19,10 +19,10 @@ package cmd
import (
"github.com/spf13/cobra"
cmdConfig "k8s.io/minikube/cmd/minikube/cmd/config"
"k8s.io/minikube/pkg/minikube/cluster"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/image"
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/node"
)
// cacheImageConfigKey is the config field name used to store which images we have previously cached
@ -75,7 +75,7 @@ var reloadCacheCmd = &cobra.Command{
Short: "reload cached images.",
Long: "reloads images previously added using the 'cache add' subcommand",
Run: func(cmd *cobra.Command, args []string) {
err := cluster.CacheAndLoadImagesInConfig()
err := node.CacheAndLoadImagesInConfig()
if err != nil {
exit.WithError("Failed to reload cached images", err)
}

View File

@ -25,10 +25,10 @@ import (
"github.com/golang/glog"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"k8s.io/minikube/pkg/minikube/cluster"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/node"
"k8s.io/minikube/pkg/minikube/out"
)
@ -59,7 +59,7 @@ minikube kubectl -- get pods --namespace kube-system`,
version = cc.KubernetesConfig.KubernetesVersion
}
path, err := cluster.CacheKubectlBinary(version)
path, err := node.CacheKubectlBinary(version)
if err != nil {
out.ErrLn("Error caching kubectl: %v", err)
}

View File

@ -61,7 +61,7 @@ var nodeStartCmd = &cobra.Command{
}
// Start it up baby
node.Start(*cc, *n, nil)
node.Start(*cc, *n, nil, false)
},
}

View File

@ -45,7 +45,6 @@ import (
"k8s.io/minikube/pkg/drivers/kic/oci"
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil"
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
"k8s.io/minikube/pkg/minikube/cluster"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/cruntime"
@ -348,10 +347,7 @@ func runStart(cmd *cobra.Command, args []string) {
}
}
kubeconfig, err := cluster.InitialSetup(cc, n, existingAddons)
if err != nil {
exit.WithError("Starting node", err)
}
kubeconfig := node.Start(cc, n, existingAddons, true)
numNodes := viper.GetInt(nodes)
if numNodes > 1 {

View File

@ -212,6 +212,10 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error {
return errors.Wrap(err, "setting up node")
}
if err := k.applyNodeLabels(cfg); err != nil {
glog.Warningf("unable to apply node labels: %v", err)
}
if err := bsutil.AdjustResourceLimits(k.c); err != nil {
glog.Warningf("unable to adjust resource limits: %v", err)
}
@ -231,10 +235,6 @@ func (k *Bootstrapper) SetupNode(cfg config.ClusterConfig) error {
}
}
if err := k.applyNodeLabels(cfg); err != nil {
glog.Warningf("unable to apply node labels: %v", err)
}
return nil
}

View File

@ -1,168 +0,0 @@
/*
Copyright 2020 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cluster
import (
"os"
"runtime"
"github.com/golang/glog"
"github.com/spf13/viper"
"golang.org/x/sync/errgroup"
cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config"
"k8s.io/minikube/pkg/drivers/kic"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/download"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/image"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/out"
)
const (
cacheImages = "cache-images"
cacheImageConfigKey = "cache"
)
// BeginCacheKubernetesImages caches images required for kubernetes version in the background
func BeginCacheKubernetesImages(g *errgroup.Group, imageRepository string, k8sVersion string, cRuntime string) {
if download.PreloadExists(k8sVersion, cRuntime) {
glog.Info("Caching tarball of preloaded images")
err := download.Preload(k8sVersion, cRuntime)
if err == nil {
glog.Infof("Finished downloading the preloaded tar for %s on %s", k8sVersion, cRuntime)
return // don't cache individual images if preload is successful.
}
glog.Warningf("Error downloading preloaded artifacts will continue without preload: %v", err)
}
if !viper.GetBool(cacheImages) {
return
}
g.Go(func() error {
return machine.CacheImagesForBootstrapper(imageRepository, k8sVersion, viper.GetString(cmdcfg.Bootstrapper))
})
}
// HandleDownloadOnly caches appropariate binaries and images
func HandleDownloadOnly(cacheGroup, kicGroup *errgroup.Group, k8sVersion string) {
// If --download-only, complete the remaining downloads and exit.
if !viper.GetBool("download-only") {
return
}
if err := doCacheBinaries(k8sVersion); err != nil {
exit.WithError("Failed to cache binaries", err)
}
if _, err := CacheKubectlBinary(k8sVersion); err != nil {
exit.WithError("Failed to cache kubectl", err)
}
WaitCacheRequiredImages(cacheGroup)
WaitDownloadKicArtifacts(kicGroup)
if err := saveImagesToTarFromConfig(); err != nil {
exit.WithError("Failed to cache images to tar", err)
}
out.T(out.Check, "Download complete!")
os.Exit(0)
}
// CacheKubectlBinary caches the kubectl binary
func CacheKubectlBinary(k8sVerison string) (string, error) {
binary := "kubectl"
if runtime.GOOS == "windows" {
binary = "kubectl.exe"
}
return download.Binary(binary, k8sVerison, runtime.GOOS, runtime.GOARCH)
}
// doCacheBinaries caches Kubernetes binaries in the foreground
func doCacheBinaries(k8sVersion string) error {
return machine.CacheBinariesForBootstrapper(k8sVersion, viper.GetString(cmdcfg.Bootstrapper))
}
// BeginDownloadKicArtifacts downloads the kic image + preload tarball, returns true if preload is available
func BeginDownloadKicArtifacts(g *errgroup.Group) {
glog.Info("Beginning downloading kic artifacts")
g.Go(func() error {
glog.Infof("Downloading %s to local daemon", kic.BaseImage)
return image.WriteImageToDaemon(kic.BaseImage)
})
}
// WaitDownloadKicArtifacts blocks until the required artifacts for KIC are downloaded.
func WaitDownloadKicArtifacts(g *errgroup.Group) {
if err := g.Wait(); err != nil {
glog.Errorln("Error downloading kic artifacts: ", err)
return
}
glog.Info("Successfully downloaded all kic artifacts")
}
// WaitCacheRequiredImages blocks until the required images are all cached.
func WaitCacheRequiredImages(g *errgroup.Group) {
if !viper.GetBool(cacheImages) {
return
}
if err := g.Wait(); err != nil {
glog.Errorln("Error caching images: ", err)
}
}
// saveImagesToTarFromConfig saves images to tar in cache which specified in config file.
// currently only used by download-only option
func saveImagesToTarFromConfig() error {
images, err := imagesInConfigFile()
if err != nil {
return err
}
if len(images) == 0 {
return nil
}
return image.SaveToDir(images, constants.ImageCacheDir)
}
// CacheAndLoadImagesInConfig loads the images currently in the config file
// called by 'start' and 'cache reload' commands.
func CacheAndLoadImagesInConfig() error {
images, err := imagesInConfigFile()
if err != nil {
return err
}
if len(images) == 0 {
return nil
}
return machine.CacheAndLoadImages(images)
}
func imagesInConfigFile() ([]string, error) {
configFile, err := config.ReadConfig(localpath.ConfigFile())
if err != nil {
return nil, err
}
if values, ok := configFile[cacheImageConfigKey]; ok {
var images []string
for key := range values.(map[string]interface{}) {
images = append(images, key)
}
return images, nil
}
return []string{}, nil
}

View File

@ -1,422 +0,0 @@
/*
Copyright 2020 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cluster
import (
"fmt"
"net"
"os"
"os/exec"
"strconv"
"strings"
"time"
"github.com/blang/semver"
"github.com/docker/machine/libmachine"
"github.com/docker/machine/libmachine/host"
"github.com/golang/glog"
"github.com/pkg/errors"
"github.com/spf13/viper"
"golang.org/x/sync/errgroup"
cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config"
"k8s.io/minikube/pkg/addons"
"k8s.io/minikube/pkg/drivers/kic/oci"
"k8s.io/minikube/pkg/minikube/bootstrapper"
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
"k8s.io/minikube/pkg/minikube/command"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/kubeconfig"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/logs"
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/out"
"k8s.io/minikube/pkg/minikube/proxy"
"k8s.io/minikube/pkg/util"
"k8s.io/minikube/pkg/util/retry"
)
const (
waitTimeout = "wait-timeout"
waitUntilHealthy = "wait"
embedCerts = "embed-certs"
keepContext = "keep-context"
imageRepository = "image-repository"
containerRuntime = "container-runtime"
)
// InitialSetup performs all necessary operations on the initial control plane node when first spinning up a cluster
func InitialSetup(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool) (*kubeconfig.Settings, error) {
var kicGroup errgroup.Group
if driver.IsKIC(cc.Driver) {
BeginDownloadKicArtifacts(&kicGroup)
}
var cacheGroup errgroup.Group
if !driver.BareMetal(cc.Driver) {
BeginCacheKubernetesImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion, cc.KubernetesConfig.ContainerRuntime)
}
// Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot.
// Hence, saveConfig must be called before startHost, and again afterwards when we know the IP.
if err := config.SaveProfile(viper.GetString(config.ProfileName), &cc); err != nil {
exit.WithError("Failed to save config", err)
}
HandleDownloadOnly(&cacheGroup, &kicGroup, n.KubernetesVersion)
WaitDownloadKicArtifacts(&kicGroup)
mRunner, preExists, machineAPI, host := StartMachine(&cc, &n)
defer machineAPI.Close()
// wait for preloaded tarball to finish downloading before configuring runtimes
WaitCacheRequiredImages(&cacheGroup)
sv, err := util.ParseKubernetesVersion(n.KubernetesVersion)
if err != nil {
return nil, err
}
// configure the runtime (docker, containerd, crio)
cr := ConfigureRuntimes(mRunner, cc.Driver, cc.KubernetesConfig, sv)
// Must be written before bootstrap, otherwise health checks may flake due to stale IP
kubeconfig, err := setupKubeconfig(host, &cc, &n, cc.Name)
if err != nil {
exit.WithError("Failed to setup kubeconfig", err)
}
// setup kubeadm (must come after setupKubeconfig)
bs := setupKubeAdm(machineAPI, cc, n)
// pull images or restart cluster
out.T(out.Launch, "Launching Kubernetes ... ")
err = bs.StartCluster(cc)
if err != nil {
exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, mRunner))
}
//configureMounts()
if err := CacheAndLoadImagesInConfig(); err != nil {
out.T(out.FailureType, "Unable to load cached images from config file.")
}
// enable addons, both old and new!
if existingAddons != nil {
addons.Start(viper.GetString(config.ProfileName), existingAddons, config.AddonList)
}
// special ops for none , like change minikube directory.
// multinode super doesn't work on the none driver
if cc.Driver == driver.None && len(cc.Nodes) == 1 {
prepareNone()
}
// Skip pre-existing, because we already waited for health
if viper.GetBool(waitUntilHealthy) && !preExists {
if err := bs.WaitForNode(cc, n, viper.GetDuration(waitTimeout)); err != nil {
exit.WithError("Wait failed", err)
}
}
return kubeconfig, nil
}
// ConfigureRuntimes does what needs to happen to get a runtime going.
func ConfigureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config.KubernetesConfig, kv semver.Version) cruntime.Manager {
co := cruntime.Config{
Type: viper.GetString(containerRuntime),
Runner: runner, ImageRepository: k8s.ImageRepository,
KubernetesVersion: kv,
}
cr, err := cruntime.New(co)
if err != nil {
exit.WithError("Failed runtime", err)
}
disableOthers := true
if driver.BareMetal(drvName) {
disableOthers = false
}
// Preload is overly invasive for bare metal, and caching is not meaningful. KIC handled elsewhere.
if driver.IsVM(drvName) {
if err := cr.Preload(k8s); err != nil {
switch err.(type) {
case *cruntime.ErrISOFeature:
out.T(out.Tip, "Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'", out.V{"error": err})
default:
glog.Warningf("%s preload failed: %v, falling back to caching images", cr.Name(), err)
}
if err := machine.CacheImagesForBootstrapper(k8s.ImageRepository, k8s.KubernetesVersion, viper.GetString(cmdcfg.Bootstrapper)); err != nil {
exit.WithError("Failed to cache images", err)
}
}
}
err = cr.Enable(disableOthers)
if err != nil {
exit.WithError("Failed to enable container runtime", err)
}
return cr
}
// setupKubeAdm adds any requested files into the VM before Kubernetes is started
func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node) bootstrapper.Bootstrapper {
bs, err := Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cfg, n)
if err != nil {
exit.WithError("Failed to get bootstrapper", err)
}
for _, eo := range config.ExtraOptions {
out.T(out.Option, "{{.extra_option_component_name}}.{{.key}}={{.value}}", out.V{"extra_option_component_name": eo.Component, "key": eo.Key, "value": eo.Value})
}
// Loads cached images, generates config files, download binaries
if err := bs.UpdateCluster(cfg); err != nil {
exit.WithError("Failed to update cluster", err)
}
if err := bs.SetupCerts(cfg.KubernetesConfig, n); err != nil {
exit.WithError("Failed to setup certs", err)
}
return bs
}
func setupKubeconfig(h *host.Host, cc *config.ClusterConfig, n *config.Node, clusterName string) (*kubeconfig.Settings, error) {
addr, err := apiServerURL(*h, *cc, *n)
if err != nil {
exit.WithError("Failed to get API Server URL", err)
}
if cc.KubernetesConfig.APIServerName != constants.APIServerName {
addr = strings.Replace(addr, n.IP, cc.KubernetesConfig.APIServerName, -1)
}
kcs := &kubeconfig.Settings{
ClusterName: clusterName,
ClusterServerAddress: addr,
ClientCertificate: localpath.MakeMiniPath("client.crt"),
ClientKey: localpath.MakeMiniPath("client.key"),
CertificateAuthority: localpath.MakeMiniPath("ca.crt"),
KeepContext: viper.GetBool(keepContext),
EmbedCerts: viper.GetBool(embedCerts),
}
kcs.SetPath(kubeconfig.PathFromEnv())
if err := kubeconfig.Update(kcs); err != nil {
return kcs, err
}
return kcs, nil
}
func apiServerURL(h host.Host, cc config.ClusterConfig, n config.Node) (string, error) {
hostname := ""
port := n.Port
var err error
if driver.IsKIC(h.DriverName) {
// for kic drivers we use 127.0.0.1 instead of node IP,
// because of Docker on MacOs limitations for reaching to container's IP.
hostname = oci.DefaultBindIPV4
port, err = oci.ForwardedPort(h.DriverName, h.Name, port)
if err != nil {
return "", errors.Wrap(err, "host port binding")
}
} else {
hostname, err = h.Driver.GetIP()
if err != nil {
return "", errors.Wrap(err, "get ip")
}
}
if cc.KubernetesConfig.APIServerName != constants.APIServerName {
hostname = cc.KubernetesConfig.APIServerName
}
return fmt.Sprintf("https://" + net.JoinHostPort(hostname, strconv.Itoa(port))), nil
}
// StartMachine starts a VM
func StartMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) {
m, err := machine.NewAPIClient()
if err != nil {
exit.WithError("Failed to get machine client", err)
}
host, preExists = startHost(m, *cfg, *node)
runner, err = machine.CommandRunner(host)
if err != nil {
exit.WithError("Failed to get command runner", err)
}
ip := validateNetwork(host, runner)
// Bypass proxy for minikube's vm host ip
err = proxy.ExcludeIP(ip)
if err != nil {
out.ErrT(out.FailureType, "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.", out.V{"ip": ip})
}
// Save IP to config file for subsequent use
node.IP = ip
err = config.SaveNode(cfg, node)
if err != nil {
exit.WithError("saving node", err)
}
return runner, preExists, m, host
}
// startHost starts a new minikube host using a VM or None
func startHost(api libmachine.API, mc config.ClusterConfig, n config.Node) (*host.Host, bool) {
host, exists, err := machine.StartHost(api, mc, n)
if err != nil {
exit.WithError("Unable to start VM. Please investigate and run 'minikube delete' if possible", err)
}
return host, exists
}
// validateNetwork tries to catch network problems as soon as possible
func validateNetwork(h *host.Host, r command.Runner) string {
ip, err := h.Driver.GetIP()
if err != nil {
exit.WithError("Unable to get VM IP address", err)
}
optSeen := false
warnedOnce := false
for _, k := range proxy.EnvVars {
if v := os.Getenv(k); v != "" {
if !optSeen {
out.T(out.Internet, "Found network options:")
optSeen = true
}
out.T(out.Option, "{{.key}}={{.value}}", out.V{"key": k, "value": v})
ipExcluded := proxy.IsIPExcluded(ip) // Skip warning if minikube ip is already in NO_PROXY
k = strings.ToUpper(k) // for http_proxy & https_proxy
if (k == "HTTP_PROXY" || k == "HTTPS_PROXY") && !ipExcluded && !warnedOnce {
out.WarningT("You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details", out.V{"ip_address": ip, "documentation_url": "https://minikube.sigs.k8s.io/docs/reference/networking/proxy/"})
warnedOnce = true
}
}
}
if !driver.BareMetal(h.Driver.DriverName()) && !driver.IsKIC(h.Driver.DriverName()) {
trySSH(h, ip)
}
tryLookup(r)
tryRegistry(r)
return ip
}
func trySSH(h *host.Host, ip string) {
if viper.GetBool("force") {
return
}
sshAddr := net.JoinHostPort(ip, "22")
dial := func() (err error) {
d := net.Dialer{Timeout: 3 * time.Second}
conn, err := d.Dial("tcp", sshAddr)
if err != nil {
out.WarningT("Unable to verify SSH connectivity: {{.error}}. Will retry...", out.V{"error": err})
return err
}
_ = conn.Close()
return nil
}
if err := retry.Expo(dial, time.Second, 13*time.Second); err != nil {
exit.WithCodeT(exit.IO, `minikube is unable to connect to the VM: {{.error}}
This is likely due to one of two reasons:
- VPN or firewall interference
- {{.hypervisor}} network configuration issue
Suggested workarounds:
- Disable your local VPN or firewall software
- Configure your local VPN or firewall to allow access to {{.ip}}
- Restart or reinstall {{.hypervisor}}
- Use an alternative --vm-driver
- Use --force to override this connectivity check
`, out.V{"error": err, "hypervisor": h.Driver.DriverName(), "ip": ip})
}
}
func tryLookup(r command.Runner) {
// DNS check
if rr, err := r.RunCmd(exec.Command("nslookup", "kubernetes.io", "-type=ns")); err != nil {
glog.Infof("%s failed: %v which might be okay will retry nslookup without query type", rr.Args, err)
// will try with without query type for ISOs with different busybox versions.
if _, err = r.RunCmd(exec.Command("nslookup", "kubernetes.io")); err != nil {
glog.Warningf("nslookup failed: %v", err)
out.WarningT("Node may be unable to resolve external DNS records")
}
}
}
func tryRegistry(r command.Runner) {
// Try an HTTPS connection to the image repository
proxy := os.Getenv("HTTPS_PROXY")
opts := []string{"-sS"}
if proxy != "" && !strings.HasPrefix(proxy, "localhost") && !strings.HasPrefix(proxy, "127.0") {
opts = append([]string{"-x", proxy}, opts...)
}
repo := viper.GetString(imageRepository)
if repo == "" {
repo = images.DefaultKubernetesRepo
}
opts = append(opts, fmt.Sprintf("https://%s/", repo))
if rr, err := r.RunCmd(exec.Command("curl", opts...)); err != nil {
glog.Warningf("%s failed: %v", rr.Args, err)
out.WarningT("VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository", out.V{"repository": repo})
}
}
// prepareNone prepares the user and host for the joy of the "none" driver
func prepareNone() {
out.T(out.StartingNone, "Configuring local host environment ...")
if viper.GetBool(config.WantNoneDriverWarning) {
out.T(out.Empty, "")
out.WarningT("The 'none' driver provides limited isolation and may reduce system security and reliability.")
out.WarningT("For more information, see:")
out.T(out.URL, "https://minikube.sigs.k8s.io/docs/reference/drivers/none/")
out.T(out.Empty, "")
}
if os.Getenv("CHANGE_MINIKUBE_NONE_USER") == "" {
home := os.Getenv("HOME")
out.WarningT("kubectl and minikube configuration will be stored in {{.home_folder}}", out.V{"home_folder": home})
out.WarningT("To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:")
out.T(out.Empty, "")
out.T(out.Command, "sudo mv {{.home_folder}}/.kube {{.home_folder}}/.minikube $HOME", out.V{"home_folder": home})
out.T(out.Command, "sudo chown -R $USER $HOME/.kube $HOME/.minikube")
out.T(out.Empty, "")
out.T(out.Tip, "This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true")
}
if err := util.MaybeChownDirRecursiveToMinikubeUser(localpath.MiniPath()); err != nil {
exit.WithCodeT(exit.Permissions, "Failed to change permissions for {{.minikube_dir_path}}: {{.error}}", out.V{"minikube_dir_path": localpath.MiniPath(), "error": err})
}
}

View File

@ -39,7 +39,7 @@ func Add(cc *config.ClusterConfig, n config.Node) error {
return err
}
Start(*cc, n, nil)
Start(*cc, n, nil, false)
return nil
}

View File

@ -17,94 +17,447 @@ limitations under the License.
package node
import (
"fmt"
"net"
"os"
"os/exec"
"strconv"
"strings"
"time"
"github.com/blang/semver"
"github.com/docker/machine/libmachine"
"github.com/docker/machine/libmachine/host"
"github.com/golang/glog"
"github.com/pkg/errors"
"github.com/spf13/viper"
"golang.org/x/sync/errgroup"
cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config"
"k8s.io/minikube/pkg/addons"
"k8s.io/minikube/pkg/drivers/kic/oci"
"k8s.io/minikube/pkg/minikube/bootstrapper"
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
"k8s.io/minikube/pkg/minikube/cluster"
"k8s.io/minikube/pkg/minikube/command"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/kubeconfig"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/logs"
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/out"
"k8s.io/minikube/pkg/minikube/proxy"
"k8s.io/minikube/pkg/util"
"k8s.io/minikube/pkg/util/retry"
)
const (
waitTimeout = "wait-timeout"
waitUntilHealthy = "wait"
embedCerts = "embed-certs"
keepContext = "keep-context"
imageRepository = "image-repository"
containerRuntime = "container-runtime"
)
// Start spins up a guest and starts the kubernetes node.
func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool) {
// Now that the ISO is downloaded, pull images in the background while the VM boots.
var cacheGroup errgroup.Group
if !driver.BareMetal(cc.Driver) {
cluster.BeginCacheKubernetesImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion, cc.KubernetesConfig.ContainerRuntime)
}
func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool, apiServer bool) *kubeconfig.Settings {
var kicGroup errgroup.Group
if driver.IsKIC(cc.Driver) {
cluster.BeginDownloadKicArtifacts(&kicGroup)
beginDownloadKicArtifacts(&kicGroup)
}
runner, _, mAPI, _ := cluster.StartMachine(&cc, &n)
defer mAPI.Close()
bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cc, n)
if err != nil {
exit.WithError("Failed to get bootstrapper", err)
var cacheGroup errgroup.Group
if !driver.BareMetal(cc.Driver) {
beginCacheKubernetesImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion, cc.KubernetesConfig.ContainerRuntime)
}
k8sVersion := n.KubernetesVersion
driverName := cc.Driver
// exits here in case of --download-only option.
cluster.HandleDownloadOnly(&cacheGroup, &kicGroup, k8sVersion)
cluster.WaitDownloadKicArtifacts(&kicGroup)
// Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot.
// Hence, saveConfig must be called before startHost, and again afterwards when we know the IP.
if err := config.SaveProfile(viper.GetString(config.ProfileName), &cc); err != nil {
exit.WithError("Failed to save config", err)
}
handleDownloadOnly(&cacheGroup, &kicGroup, n.KubernetesVersion)
waitDownloadKicArtifacts(&kicGroup)
mRunner, preExists, machineAPI, host := startMachine(&cc, &n)
defer machineAPI.Close()
// wait for preloaded tarball to finish downloading before configuring runtimes
cluster.WaitCacheRequiredImages(&cacheGroup)
waitCacheRequiredImages(&cacheGroup)
sv, err := util.ParseKubernetesVersion(cc.KubernetesConfig.KubernetesVersion)
sv, err := util.ParseKubernetesVersion(n.KubernetesVersion)
if err != nil {
exit.WithError("Failed to parse kubernetes version", err)
}
// configure the runtime (docker, containerd, crio)
cr := cluster.ConfigureRuntimes(runner, driverName, cc.KubernetesConfig, sv)
showVersionInfo(k8sVersion, cr)
cr := configureRuntimes(mRunner, cc.Driver, cc.KubernetesConfig, sv)
showVersionInfo(n.KubernetesVersion, cr)
var bs bootstrapper.Bootstrapper
var kubeconfig *kubeconfig.Settings
if apiServer {
// Must be written before bootstrap, otherwise health checks may flake due to stale IP
kubeconfig, err = setupKubeconfig(host, &cc, &n, cc.Name)
if err != nil {
exit.WithError("Failed to setup kubeconfig", err)
}
// setup kubeadm (must come after setupKubeconfig)
bs = setupKubeAdm(machineAPI, cc, n)
err = bs.StartCluster(cc)
if err != nil {
exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, mRunner))
}
} else {
bs, err = cluster.Bootstrapper(machineAPI, viper.GetString(cmdcfg.Bootstrapper), cc, n)
if err != nil {
exit.WithError("Failed to get bootstrapper", err)
}
if err = bs.SetupCerts(cc.KubernetesConfig, n); err != nil {
exit.WithError("setting up certs", err)
}
if err = bs.SetupNode(cc); err != nil {
exit.WithError("Failed to setup node", err)
}
}
configureMounts()
if err := CacheAndLoadImagesInConfig(); err != nil {
out.T(out.FailureType, "Unable to load cached images from config file.")
}
// enable addons, both old and new!
if existingAddons != nil {
addons.Start(viper.GetString(config.ProfileName), existingAddons, config.AddonList)
}
if err := bs.UpdateNode(cc, n, cr); err != nil {
exit.WithError("Failed to update node", err)
if apiServer {
// special ops for none , like change minikube directory.
// multinode super doesn't work on the none driver
if cc.Driver == driver.None && len(cc.Nodes) == 1 {
prepareNone()
}
// Skip pre-existing, because we already waited for health
if viper.GetBool(waitUntilHealthy) && !preExists {
if err := bs.WaitForNode(cc, n, viper.GetDuration(waitTimeout)); err != nil {
exit.WithError("Wait failed", err)
}
}
} else {
if err := bs.UpdateNode(cc, n, cr); err != nil {
exit.WithError("Updating node", err)
}
cp, err := config.PrimaryControlPlane(&cc)
if err != nil {
exit.WithError("Getting primary control plane", err)
}
cpBs, err := cluster.Bootstrapper(machineAPI, viper.GetString(cmdcfg.Bootstrapper), cc, cp)
if err != nil {
exit.WithError("Getting bootstrapper", err)
}
joinCmd, err := cpBs.GenerateToken(cc)
if err != nil {
exit.WithError("generating join token", err)
}
if err = bs.JoinCluster(cc, n, joinCmd); err != nil {
exit.WithError("joining cluster", err)
}
}
if err := cluster.CacheAndLoadImagesInConfig(); err != nil {
exit.WithError("Unable to load cached images from config file.", err)
}
return kubeconfig
if err = bs.SetupCerts(cc.KubernetesConfig, n); err != nil {
exit.WithError("setting up certs", err)
}
}
if err = bs.SetupNode(cc); err != nil {
exit.WithError("Failed to setup node", err)
// ConfigureRuntimes does what needs to happen to get a runtime going.
func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config.KubernetesConfig, kv semver.Version) cruntime.Manager {
co := cruntime.Config{
Type: viper.GetString(containerRuntime),
Runner: runner, ImageRepository: k8s.ImageRepository,
KubernetesVersion: kv,
}
cp, err := config.PrimaryControlPlane(&cc)
cr, err := cruntime.New(co)
if err != nil {
exit.WithError("Getting primary control plane", err)
}
cpBs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cc, cp)
if err != nil {
exit.WithError("Getting bootstrapper", err)
exit.WithError("Failed runtime", err)
}
joinCmd, err := cpBs.GenerateToken(cc)
if err != nil {
exit.WithError("generating join token", err)
disableOthers := true
if driver.BareMetal(drvName) {
disableOthers = false
}
if err = bs.JoinCluster(cc, n, joinCmd); err != nil {
exit.WithError("joining cluster", err)
// Preload is overly invasive for bare metal, and caching is not meaningful. KIC handled elsewhere.
if driver.IsVM(drvName) {
if err := cr.Preload(k8s); err != nil {
switch err.(type) {
case *cruntime.ErrISOFeature:
out.T(out.Tip, "Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'", out.V{"error": err})
default:
glog.Warningf("%s preload failed: %v, falling back to caching images", cr.Name(), err)
}
if err := machine.CacheImagesForBootstrapper(k8s.ImageRepository, k8s.KubernetesVersion, viper.GetString(cmdcfg.Bootstrapper)); err != nil {
exit.WithError("Failed to cache images", err)
}
}
}
err = cr.Enable(disableOthers)
if err != nil {
exit.WithError("Failed to enable container runtime", err)
}
return cr
}
// setupKubeAdm adds any requested files into the VM before Kubernetes is started
func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node) bootstrapper.Bootstrapper {
bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cfg, n)
if err != nil {
exit.WithError("Failed to get bootstrapper", err)
}
for _, eo := range config.ExtraOptions {
out.T(out.Option, "{{.extra_option_component_name}}.{{.key}}={{.value}}", out.V{"extra_option_component_name": eo.Component, "key": eo.Key, "value": eo.Value})
}
// Loads cached images, generates config files, download binaries
if err := bs.UpdateCluster(cfg); err != nil {
exit.WithError("Failed to update cluster", err)
}
if err := bs.SetupCerts(cfg.KubernetesConfig, n); err != nil {
exit.WithError("Failed to setup certs", err)
}
return bs
}
func setupKubeconfig(h *host.Host, cc *config.ClusterConfig, n *config.Node, clusterName string) (*kubeconfig.Settings, error) {
addr, err := apiServerURL(*h, *cc, *n)
if err != nil {
exit.WithError("Failed to get API Server URL", err)
}
if cc.KubernetesConfig.APIServerName != constants.APIServerName {
addr = strings.Replace(addr, n.IP, cc.KubernetesConfig.APIServerName, -1)
}
kcs := &kubeconfig.Settings{
ClusterName: clusterName,
ClusterServerAddress: addr,
ClientCertificate: localpath.MakeMiniPath("client.crt"),
ClientKey: localpath.MakeMiniPath("client.key"),
CertificateAuthority: localpath.MakeMiniPath("ca.crt"),
KeepContext: viper.GetBool(keepContext),
EmbedCerts: viper.GetBool(embedCerts),
}
kcs.SetPath(kubeconfig.PathFromEnv())
if err := kubeconfig.Update(kcs); err != nil {
return kcs, err
}
return kcs, nil
}
func apiServerURL(h host.Host, cc config.ClusterConfig, n config.Node) (string, error) {
hostname := ""
port := n.Port
var err error
if driver.IsKIC(h.DriverName) {
// for kic drivers we use 127.0.0.1 instead of node IP,
// because of Docker on MacOs limitations for reaching to container's IP.
hostname = oci.DefaultBindIPV4
port, err = oci.ForwardedPort(h.DriverName, h.Name, port)
if err != nil {
return "", errors.Wrap(err, "host port binding")
}
} else {
hostname, err = h.Driver.GetIP()
if err != nil {
return "", errors.Wrap(err, "get ip")
}
}
if cc.KubernetesConfig.APIServerName != constants.APIServerName {
hostname = cc.KubernetesConfig.APIServerName
}
return fmt.Sprintf("https://" + net.JoinHostPort(hostname, strconv.Itoa(port))), nil
}
// StartMachine starts a VM
func startMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) {
m, err := machine.NewAPIClient()
if err != nil {
exit.WithError("Failed to get machine client", err)
}
host, preExists = startHost(m, *cfg, *node)
runner, err = machine.CommandRunner(host)
if err != nil {
exit.WithError("Failed to get command runner", err)
}
ip := validateNetwork(host, runner)
// Bypass proxy for minikube's vm host ip
err = proxy.ExcludeIP(ip)
if err != nil {
out.ErrT(out.FailureType, "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.", out.V{"ip": ip})
}
// Save IP to config file for subsequent use
node.IP = ip
err = config.SaveNode(cfg, node)
if err != nil {
exit.WithError("saving node", err)
}
return runner, preExists, m, host
}
// startHost starts a new minikube host using a VM or None
func startHost(api libmachine.API, mc config.ClusterConfig, n config.Node) (*host.Host, bool) {
host, exists, err := machine.StartHost(api, mc, n)
if err != nil {
exit.WithError("Unable to start VM. Please investigate and run 'minikube delete' if possible", err)
}
return host, exists
}
// validateNetwork tries to catch network problems as soon as possible
func validateNetwork(h *host.Host, r command.Runner) string {
ip, err := h.Driver.GetIP()
if err != nil {
exit.WithError("Unable to get VM IP address", err)
}
optSeen := false
warnedOnce := false
for _, k := range proxy.EnvVars {
if v := os.Getenv(k); v != "" {
if !optSeen {
out.T(out.Internet, "Found network options:")
optSeen = true
}
out.T(out.Option, "{{.key}}={{.value}}", out.V{"key": k, "value": v})
ipExcluded := proxy.IsIPExcluded(ip) // Skip warning if minikube ip is already in NO_PROXY
k = strings.ToUpper(k) // for http_proxy & https_proxy
if (k == "HTTP_PROXY" || k == "HTTPS_PROXY") && !ipExcluded && !warnedOnce {
out.WarningT("You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details", out.V{"ip_address": ip, "documentation_url": "https://minikube.sigs.k8s.io/docs/reference/networking/proxy/"})
warnedOnce = true
}
}
}
if !driver.BareMetal(h.Driver.DriverName()) && !driver.IsKIC(h.Driver.DriverName()) {
trySSH(h, ip)
}
tryLookup(r)
tryRegistry(r)
return ip
}
func trySSH(h *host.Host, ip string) {
if viper.GetBool("force") {
return
}
sshAddr := net.JoinHostPort(ip, "22")
dial := func() (err error) {
d := net.Dialer{Timeout: 3 * time.Second}
conn, err := d.Dial("tcp", sshAddr)
if err != nil {
out.WarningT("Unable to verify SSH connectivity: {{.error}}. Will retry...", out.V{"error": err})
return err
}
_ = conn.Close()
return nil
}
if err := retry.Expo(dial, time.Second, 13*time.Second); err != nil {
exit.WithCodeT(exit.IO, `minikube is unable to connect to the VM: {{.error}}
This is likely due to one of two reasons:
- VPN or firewall interference
- {{.hypervisor}} network configuration issue
Suggested workarounds:
- Disable your local VPN or firewall software
- Configure your local VPN or firewall to allow access to {{.ip}}
- Restart or reinstall {{.hypervisor}}
- Use an alternative --vm-driver
- Use --force to override this connectivity check
`, out.V{"error": err, "hypervisor": h.Driver.DriverName(), "ip": ip})
}
}
func tryLookup(r command.Runner) {
// DNS check
if rr, err := r.RunCmd(exec.Command("nslookup", "kubernetes.io", "-type=ns")); err != nil {
glog.Infof("%s failed: %v which might be okay will retry nslookup without query type", rr.Args, err)
// will try with without query type for ISOs with different busybox versions.
if _, err = r.RunCmd(exec.Command("nslookup", "kubernetes.io")); err != nil {
glog.Warningf("nslookup failed: %v", err)
out.WarningT("Node may be unable to resolve external DNS records")
}
}
}
func tryRegistry(r command.Runner) {
// Try an HTTPS connection to the image repository
proxy := os.Getenv("HTTPS_PROXY")
opts := []string{"-sS"}
if proxy != "" && !strings.HasPrefix(proxy, "localhost") && !strings.HasPrefix(proxy, "127.0") {
opts = append([]string{"-x", proxy}, opts...)
}
repo := viper.GetString(imageRepository)
if repo == "" {
repo = images.DefaultKubernetesRepo
}
opts = append(opts, fmt.Sprintf("https://%s/", repo))
if rr, err := r.RunCmd(exec.Command("curl", opts...)); err != nil {
glog.Warningf("%s failed: %v", rr.Args, err)
out.WarningT("VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository", out.V{"repository": repo})
}
}
// prepareNone prepares the user and host for the joy of the "none" driver
func prepareNone() {
out.T(out.StartingNone, "Configuring local host environment ...")
if viper.GetBool(config.WantNoneDriverWarning) {
out.T(out.Empty, "")
out.WarningT("The 'none' driver provides limited isolation and may reduce system security and reliability.")
out.WarningT("For more information, see:")
out.T(out.URL, "https://minikube.sigs.k8s.io/docs/reference/drivers/none/")
out.T(out.Empty, "")
}
if os.Getenv("CHANGE_MINIKUBE_NONE_USER") == "" {
home := os.Getenv("HOME")
out.WarningT("kubectl and minikube configuration will be stored in {{.home_folder}}", out.V{"home_folder": home})
out.WarningT("To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:")
out.T(out.Empty, "")
out.T(out.Command, "sudo mv {{.home_folder}}/.kube {{.home_folder}}/.minikube $HOME", out.V{"home_folder": home})
out.T(out.Command, "sudo chown -R $USER $HOME/.kube $HOME/.minikube")
out.T(out.Empty, "")
out.T(out.Tip, "This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true")
}
if err := util.MaybeChownDirRecursiveToMinikubeUser(localpath.MiniPath()); err != nil {
exit.WithCodeT(exit.Permissions, "Failed to change permissions for {{.minikube_dir_path}}: {{.error}}", out.V{"minikube_dir_path": localpath.MiniPath(), "error": err})
}
}