From 8536fb76419d04bd2c75b47f2599792b6cef28a0 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 19 Feb 2020 12:53:59 -0800 Subject: [PATCH 01/42] changes and things --- cmd/minikube/cmd/config/set_test.go | 2 +- cmd/minikube/cmd/logs.go | 2 +- cmd/minikube/cmd/start.go | 14 +++---- cmd/minikube/cmd/start_test.go | 6 +-- pkg/addons/addons.go | 2 +- pkg/addons/addons_test.go | 2 +- pkg/addons/config.go | 2 +- pkg/minikube/bootstrapper/bootstrapper.go | 7 ++-- pkg/minikube/bootstrapper/bsutil/kubeadm.go | 2 +- .../bootstrapper/bsutil/kubeadm_test.go | 24 ++++++------ pkg/minikube/bootstrapper/bsutil/kubelet.go | 2 +- .../bootstrapper/bsutil/kubelet_test.go | 12 +++--- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 37 +++++++++++++++---- pkg/minikube/cluster/cluster.go | 4 +- pkg/minikube/cluster/iso.go | 2 +- pkg/minikube/config/config.go | 14 +++---- pkg/minikube/config/node.go | 2 +- pkg/minikube/config/profile.go | 6 +-- pkg/minikube/config/profile_test.go | 8 ++-- .../.minikube2/profiles/p1/config.json | 2 +- .../.minikube2/profiles/p2/config.json | 2 +- .../profiles/p5_partial_config/config.json | 2 +- .../.minikube/profiles/p1/config.json | 2 +- .../p4_partial_profile_config/config.json | 2 +- .../p5_missing_machine_config/config.json | 2 +- .../p6_empty_machine_config/config.json | 2 +- .../p7_invalid_machine_config/config.json | 2 +- .../p8_partial_machine_config/config.json | 2 +- .../.minikube/profiles/p1/config.json | 2 +- .../p4_partial_profile_config/config.json | 2 +- .../p5_missing_machine_config/config.json | 2 +- .../p6_empty_machine_config/config.json | 2 +- .../p7_invalid_machine_config/config.json | 2 +- .../p8_partial_machine_config/config.json | 2 +- pkg/minikube/config/types.go | 6 +-- pkg/minikube/machine/cache_images.go | 2 +- pkg/minikube/machine/cluster_test.go | 34 ++++++++--------- pkg/minikube/machine/fix.go | 2 +- pkg/minikube/machine/start.go | 10 ++--- pkg/minikube/node/config.go | 4 +- pkg/minikube/node/machine.go | 4 +- pkg/minikube/node/node.go | 13 ++++--- pkg/minikube/node/start.go | 2 +- pkg/minikube/registry/drvs/docker/docker.go | 2 +- .../registry/drvs/hyperkit/hyperkit.go | 2 +- pkg/minikube/registry/drvs/hyperv/hyperv.go | 2 +- pkg/minikube/registry/drvs/kvm2/kvm2.go | 2 +- pkg/minikube/registry/drvs/none/none.go | 2 +- .../registry/drvs/parallels/parallels.go | 2 +- pkg/minikube/registry/drvs/podman/podman.go | 2 +- .../registry/drvs/virtualbox/virtualbox.go | 2 +- pkg/minikube/registry/drvs/vmware/vmware.go | 2 +- .../drvs/vmwarefusion/vmwarefusion.go | 2 +- pkg/minikube/registry/registry.go | 2 +- pkg/minikube/tunnel/cluster_inspector.go | 4 +- pkg/minikube/tunnel/cluster_inspector_test.go | 6 +-- pkg/minikube/tunnel/test_doubles.go | 6 +-- pkg/minikube/tunnel/tunnel_test.go | 4 +- .../en/docs/Contributing/drivers.en.md | 2 +- 59 files changed, 162 insertions(+), 139 deletions(-) diff --git a/cmd/minikube/cmd/config/set_test.go b/cmd/minikube/cmd/config/set_test.go index f236445ffc..acd4db256b 100644 --- a/cmd/minikube/cmd/config/set_test.go +++ b/cmd/minikube/cmd/config/set_test.go @@ -80,7 +80,7 @@ func createTestProfile(t *testing.T) { if err := os.MkdirAll(config.ProfileFolderPath(name), 0777); err != nil { t.Fatalf("error creating temporary directory") } - if err := config.DefaultLoader.WriteConfigToFile(name, &config.MachineConfig{}); err != nil { + if err := config.DefaultLoader.WriteConfigToFile(name, &config.ClusterConfig{}); err != nil { t.Fatalf("error creating temporary profile config: %v", err) } } diff --git a/cmd/minikube/cmd/logs.go b/cmd/minikube/cmd/logs.go index 4796ef54b4..934ca9c1b0 100644 --- a/cmd/minikube/cmd/logs.go +++ b/cmd/minikube/cmd/logs.go @@ -67,7 +67,7 @@ var logsCmd = &cobra.Command{ if err != nil { exit.WithError("command runner", err) } - bs, err := cluster.Bootstrapper(api, viper.GetString(cmdcfg.Bootstrapper)) + bs, err := cluster.Bootstrapper(api, viper.GetString(cmdcfg.Bootstrapper), viper.GetString(config.MachineProfile)) if err != nil { exit.WithError("Error getting cluster bootstrapper", err) } diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index de5002a4c9..72d83bd549 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -351,7 +351,7 @@ func updateDriver(driverName string) { } } -func cacheISO(cfg *config.MachineConfig, driverName string) { +func cacheISO(cfg *config.ClusterConfig, driverName string) { if !driver.BareMetal(driverName) && !driver.IsKIC(driverName) { if err := cluster.CacheISO(*cfg); err != nil { exit.WithError("Failed to cache ISO", err) @@ -429,7 +429,7 @@ func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName st return nil } -func selectDriver(existing *config.MachineConfig) registry.DriverState { +func selectDriver(existing *config.ClusterConfig) registry.DriverState { // Technically unrelated, but important to perform before detection driver.SetLibvirtURI(viper.GetString(kvmQemuURI)) @@ -464,7 +464,7 @@ func selectDriver(existing *config.MachineConfig) registry.DriverState { } // validateDriver validates that the selected driver appears sane, exits if not -func validateDriver(ds registry.DriverState, existing *config.MachineConfig) { +func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) { name := ds.Name glog.Infof("validating driver %q against %+v", name, existing) if !driver.Supported(name) { @@ -717,10 +717,10 @@ func validateRegistryMirror() { } // generateCfgFromFlags generates config.Config based on flags and supplied arguments -func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string) (config.MachineConfig, config.Node, error) { +func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string) (config.ClusterConfig, config.Node, error) { r, err := cruntime.New(cruntime.Config{Type: viper.GetString(containerRuntime)}) if err != nil { - return config.MachineConfig{}, config.Node{}, err + return config.ClusterConfig{}, config.Node{}, err } // Pick good default values for --network-plugin and --enable-default-cni based on runtime. @@ -775,7 +775,7 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string) Worker: true, } - cfg := config.MachineConfig{ + cfg := config.ClusterConfig{ Name: viper.GetString(config.MachineProfile), KeepContext: viper.GetBool(keepContext), EmbedCerts: viper.GetBool(embedCerts), @@ -881,7 +881,7 @@ func autoSetDriverOptions(cmd *cobra.Command, drvName string) (err error) { } // getKubernetesVersion ensures that the requested version is reasonable -func getKubernetesVersion(old *config.MachineConfig) string { +func getKubernetesVersion(old *config.ClusterConfig) string { paramVersion := viper.GetString(kubernetesVersion) if paramVersion == "" { // if the user did not specify any version then ... diff --git a/cmd/minikube/cmd/start_test.go b/cmd/minikube/cmd/start_test.go index 1003292da2..b664e3486b 100644 --- a/cmd/minikube/cmd/start_test.go +++ b/cmd/minikube/cmd/start_test.go @@ -31,7 +31,7 @@ func TestGetKuberneterVersion(t *testing.T) { description string expectedVersion string paramVersion string - cfg *cfg.MachineConfig + cfg *cfg.ClusterConfig }{ { description: "kubernetes-version not given, no config", @@ -42,7 +42,7 @@ func TestGetKuberneterVersion(t *testing.T) { description: "kubernetes-version not given, config available", expectedVersion: "v1.15.0", paramVersion: "", - cfg: &cfg.MachineConfig{KubernetesConfig: cfg.KubernetesConfig{KubernetesVersion: "v1.15.0"}}, + cfg: &cfg.ClusterConfig{KubernetesConfig: cfg.KubernetesConfig{KubernetesVersion: "v1.15.0"}}, }, { description: "kubernetes-version given, no config", @@ -53,7 +53,7 @@ func TestGetKuberneterVersion(t *testing.T) { description: "kubernetes-version given, config available", expectedVersion: "v1.16.0", paramVersion: "v1.16.0", - cfg: &cfg.MachineConfig{KubernetesConfig: cfg.KubernetesConfig{KubernetesVersion: "v1.15.0"}}, + cfg: &cfg.ClusterConfig{KubernetesConfig: cfg.KubernetesConfig{KubernetesVersion: "v1.15.0"}}, }, } diff --git a/pkg/addons/addons.go b/pkg/addons/addons.go index c1042e45be..e1ca836c37 100644 --- a/pkg/addons/addons.go +++ b/pkg/addons/addons.go @@ -88,7 +88,7 @@ func run(name, value, profile string, fns []setFn) error { } // SetBool sets a bool value -func SetBool(m *config.MachineConfig, name string, val string) error { +func SetBool(m *config.ClusterConfig, name string, val string) error { b, err := strconv.ParseBool(val) if err != nil { return err diff --git a/pkg/addons/addons_test.go b/pkg/addons/addons_test.go index 14449917a2..559f5729e7 100644 --- a/pkg/addons/addons_test.go +++ b/pkg/addons/addons_test.go @@ -44,7 +44,7 @@ func createTestProfile(t *testing.T) string { if err := os.MkdirAll(config.ProfileFolderPath(name), 0777); err != nil { t.Fatalf("error creating temporary directory") } - if err := config.DefaultLoader.WriteConfigToFile(name, &config.MachineConfig{}); err != nil { + if err := config.DefaultLoader.WriteConfigToFile(name, &config.ClusterConfig{}); err != nil { t.Fatalf("error creating temporary profile config: %v", err) } return name diff --git a/pkg/addons/config.go b/pkg/addons/config.go index 059ddf7929..46c713d69f 100644 --- a/pkg/addons/config.go +++ b/pkg/addons/config.go @@ -23,7 +23,7 @@ type setFn func(string, string, string) error // Addon represents an addon type Addon struct { name string - set func(*config.MachineConfig, string, string) error + set func(*config.ClusterConfig, string, string) error validations []setFn callbacks []setFn } diff --git a/pkg/minikube/bootstrapper/bootstrapper.go b/pkg/minikube/bootstrapper/bootstrapper.go index 669a38fd57..eba5167179 100644 --- a/pkg/minikube/bootstrapper/bootstrapper.go +++ b/pkg/minikube/bootstrapper/bootstrapper.go @@ -35,10 +35,11 @@ type LogOptions struct { // Bootstrapper contains all the methods needed to bootstrap a kubernetes cluster type Bootstrapper interface { - StartCluster(config.MachineConfig) error - UpdateCluster(config.MachineConfig) error + StartCluster(config.ClusterConfig) error + UpdateCluster(config.ClusterConfig) error DeleteCluster(config.KubernetesConfig) error - WaitForCluster(config.MachineConfig, time.Duration) error + WaitForCluster(config.ClusterConfig, time.Duration) error + JoinCluster(config.ClusterConfig, config.Node, string) error // LogCommands returns a map of log type to a command which will display that log. LogCommands(LogOptions) map[string]string SetupCerts(config.KubernetesConfig, config.Node) error diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm.go b/pkg/minikube/bootstrapper/bsutil/kubeadm.go index 602c9c17de..e5d926ee12 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm.go @@ -36,7 +36,7 @@ import ( const remoteContainerRuntime = "remote" // GenerateKubeadmYAML generates the kubeadm.yaml file -func GenerateKubeadmYAML(mc config.MachineConfig, r cruntime.Manager) ([]byte, error) { +func GenerateKubeadmYAML(mc config.ClusterConfig, r cruntime.Manager) ([]byte, error) { k8s := mc.KubernetesConfig version, err := ParseKubernetesVersion(k8s.KubernetesVersion) if err != nil { diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go b/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go index 6bf8f27951..922bf4b8b3 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go @@ -106,9 +106,9 @@ func TestGenerateKubeadmYAMLDNS(t *testing.T) { name string runtime string shouldErr bool - cfg config.MachineConfig + cfg config.ClusterConfig }{ - {"dns", "docker", false, config.MachineConfig{KubernetesConfig: config.KubernetesConfig{DNSDomain: "1.1.1.1"}}}, + {"dns", "docker", false, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{DNSDomain: "1.1.1.1"}}}, } for _, version := range versions { for _, tc := range tests { @@ -172,17 +172,17 @@ func TestGenerateKubeadmYAML(t *testing.T) { name string runtime string shouldErr bool - cfg config.MachineConfig + cfg config.ClusterConfig }{ - {"default", "docker", false, config.MachineConfig{}}, - {"containerd", "containerd", false, config.MachineConfig{}}, - {"crio", "crio", false, config.MachineConfig{}}, - {"options", "docker", false, config.MachineConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOpts}}}, - {"crio-options-gates", "crio", false, config.MachineConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOpts, FeatureGates: "a=b"}}}, - {"unknown-component", "docker", true, config.MachineConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: config.ExtraOptionSlice{config.ExtraOption{Component: "not-a-real-component", Key: "killswitch", Value: "true"}}}}}, - {"containerd-api-port", "containerd", false, config.MachineConfig{Nodes: []config.Node{{Port: 12345}}}}, - {"containerd-pod-network-cidr", "containerd", false, config.MachineConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOptsPodCidr}}}, - {"image-repository", "docker", false, config.MachineConfig{KubernetesConfig: config.KubernetesConfig{ImageRepository: "test/repo"}}}, + {"default", "docker", false, config.ClusterConfig{}}, + {"containerd", "containerd", false, config.ClusterConfig{}}, + {"crio", "crio", false, config.ClusterConfig{}}, + {"options", "docker", false, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOpts}}}, + {"crio-options-gates", "crio", false, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOpts, FeatureGates: "a=b"}}}, + {"unknown-component", "docker", true, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: config.ExtraOptionSlice{config.ExtraOption{Component: "not-a-real-component", Key: "killswitch", Value: "true"}}}}}, + {"containerd-api-port", "containerd", false, config.ClusterConfig{Nodes: []config.Node{{Port: 12345}}}}, + {"containerd-pod-network-cidr", "containerd", false, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOptsPodCidr}}}, + {"image-repository", "docker", false, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{ImageRepository: "test/repo"}}}, } for _, version := range versions { for _, tc := range tests { diff --git a/pkg/minikube/bootstrapper/bsutil/kubelet.go b/pkg/minikube/bootstrapper/bsutil/kubelet.go index f080a7eba7..a426cc409b 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubelet.go +++ b/pkg/minikube/bootstrapper/bsutil/kubelet.go @@ -30,7 +30,7 @@ import ( // NewKubeletConfig generates a new systemd unit containing a configured kubelet // based on the options present in the KubernetesConfig. -func NewKubeletConfig(mc config.MachineConfig, nc config.Node, r cruntime.Manager) ([]byte, error) { +func NewKubeletConfig(mc config.ClusterConfig, nc config.Node, r cruntime.Manager) ([]byte, error) { k8s := mc.KubernetesConfig version, err := ParseKubernetesVersion(k8s.KubernetesVersion) if err != nil { diff --git a/pkg/minikube/bootstrapper/bsutil/kubelet_test.go b/pkg/minikube/bootstrapper/bsutil/kubelet_test.go index 56dc23168b..b0908e870f 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubelet_test.go +++ b/pkg/minikube/bootstrapper/bsutil/kubelet_test.go @@ -30,13 +30,13 @@ import ( func TestGenerateKubeletConfig(t *testing.T) { tests := []struct { description string - cfg config.MachineConfig + cfg config.ClusterConfig expected string shouldErr bool }{ { description: "old docker", - cfg: config.MachineConfig{ + cfg: config.ClusterConfig{ KubernetesConfig: config.KubernetesConfig{ KubernetesVersion: constants.OldestKubernetesVersion, ContainerRuntime: "docker", @@ -61,7 +61,7 @@ ExecStart=/var/lib/minikube/binaries/v1.11.10/kubelet --allow-privileged=true -- }, { description: "newest cri runtime", - cfg: config.MachineConfig{ + cfg: config.ClusterConfig{ KubernetesConfig: config.KubernetesConfig{ KubernetesVersion: constants.NewestKubernetesVersion, ContainerRuntime: "cri-o", @@ -86,7 +86,7 @@ ExecStart=/var/lib/minikube/binaries/v1.17.2/kubelet --authorization-mode=Webhoo }, { description: "default containerd runtime", - cfg: config.MachineConfig{ + cfg: config.ClusterConfig{ KubernetesConfig: config.KubernetesConfig{ KubernetesVersion: constants.DefaultKubernetesVersion, ContainerRuntime: "containerd", @@ -111,7 +111,7 @@ ExecStart=/var/lib/minikube/binaries/v1.17.2/kubelet --authorization-mode=Webhoo }, { description: "default containerd runtime with IP override", - cfg: config.MachineConfig{ + cfg: config.ClusterConfig{ KubernetesConfig: config.KubernetesConfig{ KubernetesVersion: constants.DefaultKubernetesVersion, ContainerRuntime: "containerd", @@ -143,7 +143,7 @@ ExecStart=/var/lib/minikube/binaries/v1.17.2/kubelet --authorization-mode=Webhoo }, { description: "docker with custom image repository", - cfg: config.MachineConfig{ + cfg: config.ClusterConfig{ KubernetesConfig: config.KubernetesConfig{ KubernetesVersion: constants.DefaultKubernetesVersion, ContainerRuntime: "docker", diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 9eb428d8fa..a136a9f8de 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -35,7 +35,6 @@ import ( "github.com/docker/machine/libmachine/state" "github.com/golang/glog" "github.com/pkg/errors" - "github.com/spf13/viper" "k8s.io/client-go/kubernetes" kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/minikube/pkg/drivers/kic" @@ -64,8 +63,7 @@ type Bootstrapper struct { } // NewBootstrapper creates a new kubeadm.Bootstrapper -func NewBootstrapper(api libmachine.API) (*Bootstrapper, error) { - name := viper.GetString(config.MachineProfile) +func NewBootstrapper(api libmachine.API, name string) (*Bootstrapper, error) { h, err := api.Load(name) if err != nil { return nil, errors.Wrap(err, "getting api client") @@ -149,7 +147,7 @@ func (k *Bootstrapper) createCompatSymlinks() error { } // StartCluster starts the cluster -func (k *Bootstrapper) StartCluster(cfg config.MachineConfig) error { +func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { err := bsutil.ExistingConfig(k.c) if err == nil { // if there is an existing cluster don't reconfigure it return k.restartCluster(cfg) @@ -262,7 +260,7 @@ func (k *Bootstrapper) client(ip string, port int) (*kubernetes.Clientset, error } // WaitForCluster blocks until the cluster appears to be healthy -func (k *Bootstrapper) WaitForCluster(cfg config.MachineConfig, timeout time.Duration) error { +func (k *Bootstrapper) WaitForCluster(cfg config.ClusterConfig, timeout time.Duration) error { start := time.Now() out.T(out.Waiting, "Waiting for cluster to come online ...") cp, err := config.PrimaryControlPlane(cfg) @@ -295,7 +293,7 @@ func (k *Bootstrapper) WaitForCluster(cfg config.MachineConfig, timeout time.Dur } // restartCluster restarts the Kubernetes cluster configured by kubeadm -func (k *Bootstrapper) restartCluster(cfg config.MachineConfig) error { +func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { glog.Infof("restartCluster start") start := time.Now() @@ -371,6 +369,29 @@ func (k *Bootstrapper) restartCluster(cfg config.MachineConfig) error { return nil } +// JoinCluster adds a node to an existing cluster +func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinCmd string) error { + start := time.Now() + glog.Infof("JoinCluster: %+v", cc) + defer func() { + glog.Infof("JoinCluster complete in %s", time.Since(start)) + }() + + // Join the master by specifying its token + joinCmd = fmt.Sprintf("%s --v=10 --node-name=%s", joinCmd, n.Name) + fmt.Println(joinCmd) + out, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", joinCmd)) + if err != nil { + return errors.Wrapf(err, "cmd failed: %s\n%s\n", joinCmd, out) + } + + if _, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", "sudo systemctl daemon-reload && sudo systemctl enable kubelet && sudo systemctl start kubelet")); err != nil { + return errors.Wrap(err, "starting kubelet") + } + + return nil +} + // DeleteCluster removes the components that were started earlier func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error { version, err := bsutil.ParseKubernetesVersion(k8s.KubernetesVersion) @@ -396,7 +417,7 @@ func (k *Bootstrapper) SetupCerts(k8s config.KubernetesConfig, n config.Node) er } // UpdateCluster updates the cluster -func (k *Bootstrapper) UpdateCluster(cfg config.MachineConfig) error { +func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error { images, err := images.Kubeadm(cfg.KubernetesConfig.ImageRepository, cfg.KubernetesConfig.KubernetesVersion) if err != nil { return errors.Wrap(err, "kubeadm images") @@ -469,7 +490,7 @@ func (k *Bootstrapper) UpdateCluster(cfg config.MachineConfig) error { } // applyKicOverlay applies the CNI plugin needed to make kic work -func (k *Bootstrapper) applyKicOverlay(cfg config.MachineConfig) error { +func (k *Bootstrapper) applyKicOverlay(cfg config.ClusterConfig) error { cmd := exec.Command("sudo", path.Join(vmpath.GuestPersistentDir, "binaries", cfg.KubernetesConfig.KubernetesVersion, "kubectl"), "create", fmt.Sprintf("--kubeconfig=%s", path.Join(vmpath.GuestPersistentDir, "kubeconfig")), "-f", "-") diff --git a/pkg/minikube/cluster/cluster.go b/pkg/minikube/cluster/cluster.go index 481bbe3cc3..a2b9e06613 100644 --- a/pkg/minikube/cluster/cluster.go +++ b/pkg/minikube/cluster/cluster.go @@ -42,12 +42,12 @@ func init() { } // Bootstrapper returns a new bootstrapper for the cluster -func Bootstrapper(api libmachine.API, bootstrapperName string) (bootstrapper.Bootstrapper, error) { +func Bootstrapper(api libmachine.API, bootstrapperName string, machineName string) (bootstrapper.Bootstrapper, error) { var b bootstrapper.Bootstrapper var err error switch bootstrapperName { case bootstrapper.Kubeadm: - b, err = kubeadm.NewBootstrapper(api) + b, err = kubeadm.NewBootstrapper(api, machineName) if err != nil { return nil, errors.Wrap(err, "getting a new kubeadm bootstrapper") } diff --git a/pkg/minikube/cluster/iso.go b/pkg/minikube/cluster/iso.go index 253acbd370..15c06b4a37 100644 --- a/pkg/minikube/cluster/iso.go +++ b/pkg/minikube/cluster/iso.go @@ -22,7 +22,7 @@ import ( ) // CacheISO downloads and caches ISO. -func CacheISO(cfg config.MachineConfig) error { +func CacheISO(cfg config.ClusterConfig) error { if driver.BareMetal(cfg.Driver) { return nil } diff --git a/pkg/minikube/config/config.go b/pkg/minikube/config/config.go index 89e2852017..0f031716dc 100644 --- a/pkg/minikube/config/config.go +++ b/pkg/minikube/config/config.go @@ -141,19 +141,19 @@ func encode(w io.Writer, m MinikubeConfig) error { } // Load loads the kubernetes and machine config for the current machine -func Load(profile string) (*MachineConfig, error) { +func Load(profile string) (*ClusterConfig, error) { return DefaultLoader.LoadConfigFromFile(profile) } // Write writes the kubernetes and machine config for the current machine -func Write(profile string, cc *MachineConfig) error { +func Write(profile string, cc *ClusterConfig) error { return DefaultLoader.WriteConfigToFile(profile, cc) } // Loader loads the kubernetes and machine config based on the machine profile name type Loader interface { - LoadConfigFromFile(profile string, miniHome ...string) (*MachineConfig, error) - WriteConfigToFile(profileName string, cc *MachineConfig, miniHome ...string) error + LoadConfigFromFile(profile string, miniHome ...string) (*ClusterConfig, error) + WriteConfigToFile(profileName string, cc *ClusterConfig, miniHome ...string) error } type simpleConfigLoader struct{} @@ -161,8 +161,8 @@ type simpleConfigLoader struct{} // DefaultLoader is the default config loader var DefaultLoader Loader = &simpleConfigLoader{} -func (c *simpleConfigLoader) LoadConfigFromFile(profileName string, miniHome ...string) (*MachineConfig, error) { - var cc MachineConfig +func (c *simpleConfigLoader) LoadConfigFromFile(profileName string, miniHome ...string) (*ClusterConfig, error) { + var cc ClusterConfig // Move to profile package path := profileFilePath(profileName, miniHome...) @@ -184,7 +184,7 @@ func (c *simpleConfigLoader) LoadConfigFromFile(profileName string, miniHome ... return &cc, nil } -func (c *simpleConfigLoader) WriteConfigToFile(profileName string, cc *MachineConfig, miniHome ...string) error { +func (c *simpleConfigLoader) WriteConfigToFile(profileName string, cc *ClusterConfig, miniHome ...string) error { // Move to profile package path := profileFilePath(profileName, miniHome...) contents, err := json.MarshalIndent(cc, "", " ") diff --git a/pkg/minikube/config/node.go b/pkg/minikube/config/node.go index 219acb5e46..1c6f050159 100644 --- a/pkg/minikube/config/node.go +++ b/pkg/minikube/config/node.go @@ -17,7 +17,7 @@ limitations under the License. package config // AddNode adds a new node config to an existing cluster. -func AddNode(cc *MachineConfig, name string, controlPlane bool, k8sVersion string, profileName string) error { +func AddNode(cc *ClusterConfig, name string, controlPlane bool, k8sVersion string, profileName string) error { node := Node{ Name: name, Worker: true, diff --git a/pkg/minikube/config/profile.go b/pkg/minikube/config/profile.go index c37eabac4e..5ad8572e13 100644 --- a/pkg/minikube/config/profile.go +++ b/pkg/minikube/config/profile.go @@ -52,7 +52,7 @@ func (p *Profile) IsValid() bool { } // PrimaryControlPlane gets the node specific config for the first created control plane -func PrimaryControlPlane(cc MachineConfig) (Node, error) { +func PrimaryControlPlane(cc ClusterConfig) (Node, error) { for _, n := range cc.Nodes { if n.ControlPlane { return n, nil @@ -86,12 +86,12 @@ func ProfileExists(name string, miniHome ...string) bool { // CreateEmptyProfile creates an empty profile and stores in $MINIKUBE_HOME/profiles//config.json func CreateEmptyProfile(name string, miniHome ...string) error { - cfg := &MachineConfig{} + cfg := &ClusterConfig{} return SaveProfile(name, cfg, miniHome...) } // SaveProfile creates an profile out of the cfg and stores in $MINIKUBE_HOME/profiles//config.json -func SaveProfile(name string, cfg *MachineConfig, miniHome ...string) error { +func SaveProfile(name string, cfg *ClusterConfig, miniHome ...string) error { data, err := json.MarshalIndent(cfg, "", " ") if err != nil { return err diff --git a/pkg/minikube/config/profile_test.go b/pkg/minikube/config/profile_test.go index aecb4c2f81..06903d1808 100644 --- a/pkg/minikube/config/profile_test.go +++ b/pkg/minikube/config/profile_test.go @@ -164,13 +164,13 @@ func TestCreateProfile(t *testing.T) { var testCases = []struct { name string - cfg *MachineConfig + cfg *ClusterConfig expectErr bool }{ - {"p_empty_config", &MachineConfig{}, false}, - {"p_partial_config", &MachineConfig{KubernetesConfig: KubernetesConfig{ + {"p_empty_config", &ClusterConfig{}, false}, + {"p_partial_config", &ClusterConfig{KubernetesConfig: KubernetesConfig{ ShouldLoadCachedImages: false}}, false}, - {"p_partial_config2", &MachineConfig{ + {"p_partial_config2", &ClusterConfig{ KeepContext: false, KubernetesConfig: KubernetesConfig{ ShouldLoadCachedImages: false}}, false}, } diff --git a/pkg/minikube/config/testdata/.minikube2/profiles/p1/config.json b/pkg/minikube/config/testdata/.minikube2/profiles/p1/config.json index 766e9c04c1..c4214bf442 100644 --- a/pkg/minikube/config/testdata/.minikube2/profiles/p1/config.json +++ b/pkg/minikube/config/testdata/.minikube2/profiles/p1/config.json @@ -1,5 +1,5 @@ { - "MachineConfig": { + "ClusterConfig": { "KeepContext": false, "MinikubeISO": "https://storage.googleapis.com/minikube/iso/minikube-v1.2.0.iso", "Memory": 2000, diff --git a/pkg/minikube/config/testdata/.minikube2/profiles/p2/config.json b/pkg/minikube/config/testdata/.minikube2/profiles/p2/config.json index 99e4b167a5..ab35410474 100644 --- a/pkg/minikube/config/testdata/.minikube2/profiles/p2/config.json +++ b/pkg/minikube/config/testdata/.minikube2/profiles/p2/config.json @@ -1,5 +1,5 @@ { - "MachineConfig": { + "ClusterConfig": { "KeepContext": false, "MinikubeISO": "https://storage.googleapis.com/minikube/iso/minikube-v1.2.0.iso", "Memory": 2000, diff --git a/pkg/minikube/config/testdata/.minikube2/profiles/p5_partial_config/config.json b/pkg/minikube/config/testdata/.minikube2/profiles/p5_partial_config/config.json index 29f62c0149..a99c56efe8 100644 --- a/pkg/minikube/config/testdata/.minikube2/profiles/p5_partial_config/config.json +++ b/pkg/minikube/config/testdata/.minikube2/profiles/p5_partial_config/config.json @@ -1,5 +1,5 @@ { - "MachineConfig": { + "ClusterConfig": { "KeepContext": false, "MinikubeISO": "https://storage.googleapis.com/minikube/iso/minikube-v1.2.0.iso", "Memory": 2000, diff --git a/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p1/config.json b/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p1/config.json index b0e1b57105..6c826ebfc6 100644 --- a/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p1/config.json +++ b/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p1/config.json @@ -1,5 +1,5 @@ { - "MachineConfig": { + "ClusterConfig": { "KeepContext": false, "MinikubeISO": "https://storage.googleapis.com/minikube/iso/minikube-v1.2.0.iso", "Memory": 2000, diff --git a/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p4_partial_profile_config/config.json b/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p4_partial_profile_config/config.json index 29f62c0149..a99c56efe8 100644 --- a/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p4_partial_profile_config/config.json +++ b/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p4_partial_profile_config/config.json @@ -1,5 +1,5 @@ { - "MachineConfig": { + "ClusterConfig": { "KeepContext": false, "MinikubeISO": "https://storage.googleapis.com/minikube/iso/minikube-v1.2.0.iso", "Memory": 2000, diff --git a/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p5_missing_machine_config/config.json b/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p5_missing_machine_config/config.json index c1cf21b26f..6680e4b784 100644 --- a/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p5_missing_machine_config/config.json +++ b/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p5_missing_machine_config/config.json @@ -1,5 +1,5 @@ { - "MachineConfig": { + "ClusterConfig": { "KeepContext": false, "MinikubeISO": "https://storage.googleapis.com/minikube/iso/minikube-v1.2.0.iso", "Memory": 2000, diff --git a/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p6_empty_machine_config/config.json b/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p6_empty_machine_config/config.json index 667cbd7652..2bab758640 100644 --- a/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p6_empty_machine_config/config.json +++ b/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p6_empty_machine_config/config.json @@ -1,5 +1,5 @@ { - "MachineConfig": { + "ClusterConfig": { "KeepContext": false, "MinikubeISO": "https://storage.googleapis.com/minikube/iso/minikube-v1.2.0.iso", "Memory": 2000, diff --git a/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p7_invalid_machine_config/config.json b/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p7_invalid_machine_config/config.json index 7cbd2e409f..d56f53688d 100644 --- a/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p7_invalid_machine_config/config.json +++ b/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p7_invalid_machine_config/config.json @@ -1,5 +1,5 @@ { - "MachineConfig": { + "ClusterConfig": { "KeepContext": false, "MinikubeISO": "https://storage.googleapis.com/minikube/iso/minikube-v1.2.0.iso", "Memory": 2000, diff --git a/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p8_partial_machine_config/config.json b/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p8_partial_machine_config/config.json index 855b31af90..26324fc366 100644 --- a/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p8_partial_machine_config/config.json +++ b/pkg/minikube/config/testdata/delete-all/.minikube/profiles/p8_partial_machine_config/config.json @@ -1,5 +1,5 @@ { - "MachineConfig": { + "ClusterConfig": { "KeepContext": false, "MinikubeISO": "https://storage.googleapis.com/minikube/iso/minikube-v1.2.0.iso", "Memory": 2000, diff --git a/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p1/config.json b/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p1/config.json index b0e1b57105..6c826ebfc6 100644 --- a/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p1/config.json +++ b/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p1/config.json @@ -1,5 +1,5 @@ { - "MachineConfig": { + "ClusterConfig": { "KeepContext": false, "MinikubeISO": "https://storage.googleapis.com/minikube/iso/minikube-v1.2.0.iso", "Memory": 2000, diff --git a/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p4_partial_profile_config/config.json b/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p4_partial_profile_config/config.json index 29f62c0149..a99c56efe8 100644 --- a/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p4_partial_profile_config/config.json +++ b/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p4_partial_profile_config/config.json @@ -1,5 +1,5 @@ { - "MachineConfig": { + "ClusterConfig": { "KeepContext": false, "MinikubeISO": "https://storage.googleapis.com/minikube/iso/minikube-v1.2.0.iso", "Memory": 2000, diff --git a/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p5_missing_machine_config/config.json b/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p5_missing_machine_config/config.json index c1cf21b26f..6680e4b784 100644 --- a/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p5_missing_machine_config/config.json +++ b/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p5_missing_machine_config/config.json @@ -1,5 +1,5 @@ { - "MachineConfig": { + "ClusterConfig": { "KeepContext": false, "MinikubeISO": "https://storage.googleapis.com/minikube/iso/minikube-v1.2.0.iso", "Memory": 2000, diff --git a/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p6_empty_machine_config/config.json b/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p6_empty_machine_config/config.json index 667cbd7652..2bab758640 100644 --- a/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p6_empty_machine_config/config.json +++ b/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p6_empty_machine_config/config.json @@ -1,5 +1,5 @@ { - "MachineConfig": { + "ClusterConfig": { "KeepContext": false, "MinikubeISO": "https://storage.googleapis.com/minikube/iso/minikube-v1.2.0.iso", "Memory": 2000, diff --git a/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p7_invalid_machine_config/config.json b/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p7_invalid_machine_config/config.json index 7cbd2e409f..d56f53688d 100644 --- a/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p7_invalid_machine_config/config.json +++ b/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p7_invalid_machine_config/config.json @@ -1,5 +1,5 @@ { - "MachineConfig": { + "ClusterConfig": { "KeepContext": false, "MinikubeISO": "https://storage.googleapis.com/minikube/iso/minikube-v1.2.0.iso", "Memory": 2000, diff --git a/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p8_partial_machine_config/config.json b/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p8_partial_machine_config/config.json index 855b31af90..26324fc366 100644 --- a/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p8_partial_machine_config/config.json +++ b/pkg/minikube/config/testdata/delete-single/.minikube/profiles/p8_partial_machine_config/config.json @@ -1,5 +1,5 @@ { - "MachineConfig": { + "ClusterConfig": { "KeepContext": false, "MinikubeISO": "https://storage.googleapis.com/minikube/iso/minikube-v1.2.0.iso", "Memory": 2000, diff --git a/pkg/minikube/config/types.go b/pkg/minikube/config/types.go index 6513efd2d9..868ad8842e 100644 --- a/pkg/minikube/config/types.go +++ b/pkg/minikube/config/types.go @@ -27,11 +27,11 @@ import ( type Profile struct { Name string Status string // running, stopped - Config *MachineConfig + Config *ClusterConfig } -// MachineConfig contains the parameters used to start a cluster. -type MachineConfig struct { +// ClusterConfig contains the parameters used to start a cluster. +type ClusterConfig struct { Name string KeepContext bool // used by start and profile command to or not to switch kubectl's current context EmbedCerts bool // used by kubeconfig.Setup diff --git a/pkg/minikube/machine/cache_images.go b/pkg/minikube/machine/cache_images.go index 29d61b244f..301c3b02fd 100644 --- a/pkg/minikube/machine/cache_images.go +++ b/pkg/minikube/machine/cache_images.go @@ -61,7 +61,7 @@ func CacheImagesForBootstrapper(imageRepository string, version string, clusterB } // LoadImages loads previously cached images into the container runtime -func LoadImages(cc *config.MachineConfig, runner command.Runner, images []string, cacheDir string) error { +func LoadImages(cc *config.ClusterConfig, runner command.Runner, images []string, cacheDir string) error { glog.Infof("LoadImages start: %s", images) start := time.Now() diff --git a/pkg/minikube/machine/cluster_test.go b/pkg/minikube/machine/cluster_test.go index bf38656062..3b84c58842 100644 --- a/pkg/minikube/machine/cluster_test.go +++ b/pkg/minikube/machine/cluster_test.go @@ -41,7 +41,7 @@ type MockDownloader struct{} func (d MockDownloader) GetISOFileURI(isoURL string) string { return "" } func (d MockDownloader) CacheMinikubeISOFromURL(isoURL string) error { return nil } -func createMockDriverHost(c config.MachineConfig) (interface{}, error) { +func createMockDriverHost(c config.ClusterConfig) (interface{}, error) { return nil, nil } @@ -60,7 +60,7 @@ func RegisterMockDriver(t *testing.T) { } } -var defaultMachineConfig = config.MachineConfig{ +var defaultClusterConfig = config.ClusterConfig{ Driver: driver.Mock, MinikubeISO: constants.DefaultISOURL, Downloader: MockDownloader{}, @@ -76,7 +76,7 @@ func TestCreateHost(t *testing.T) { t.Fatal("Machine already exists.") } - _, err := createHost(api, defaultMachineConfig) + _, err := createHost(api, defaultClusterConfig) if err != nil { t.Fatalf("Error creating host: %v", err) } @@ -114,7 +114,7 @@ func TestStartHostExists(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) // Create an initial host. - ih, err := createHost(api, defaultMachineConfig) + ih, err := createHost(api, defaultClusterConfig) if err != nil { t.Fatalf("Error creating host: %v", err) } @@ -128,7 +128,7 @@ func TestStartHostExists(t *testing.T) { md := &tests.MockDetector{Provisioner: &tests.MockProvisioner{}} provision.SetDetector(md) - mc := defaultMachineConfig + mc := defaultClusterConfig mc.Name = ih.Name // This should pass without calling Create because the host exists already. h, err := StartHost(api, mc) @@ -151,7 +151,7 @@ func TestStartHostErrMachineNotExist(t *testing.T) { api := tests.NewMockAPI(t) // Create an incomplete host with machine does not exist error(i.e. User Interrupt Cancel) api.NotExistError = true - h, err := createHost(api, defaultMachineConfig) + h, err := createHost(api, defaultClusterConfig) if err != nil { t.Fatalf("Error creating host: %v", err) } @@ -159,7 +159,7 @@ func TestStartHostErrMachineNotExist(t *testing.T) { md := &tests.MockDetector{Provisioner: &tests.MockProvisioner{}} provision.SetDetector(md) - mc := defaultMachineConfig + mc := defaultClusterConfig mc.Name = h.Name // This should pass with creating host, while machine does not exist. @@ -193,7 +193,7 @@ func TestStartStoppedHost(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) // Create an initial host. - h, err := createHost(api, defaultMachineConfig) + h, err := createHost(api, defaultClusterConfig) if err != nil { t.Fatalf("Error creating host: %v", err) } @@ -203,7 +203,7 @@ func TestStartStoppedHost(t *testing.T) { md := &tests.MockDetector{Provisioner: &tests.MockProvisioner{}} provision.SetDetector(md) - mc := defaultMachineConfig + mc := defaultClusterConfig mc.Name = h.Name h, err = StartHost(api, mc) if err != nil { @@ -233,7 +233,7 @@ func TestStartHost(t *testing.T) { md := &tests.MockDetector{Provisioner: &tests.MockProvisioner{}} provision.SetDetector(md) - h, err := StartHost(api, defaultMachineConfig) + h, err := StartHost(api, defaultClusterConfig) if err != nil { t.Fatal("Error starting host.") } @@ -261,7 +261,7 @@ func TestStartHostConfig(t *testing.T) { md := &tests.MockDetector{Provisioner: &tests.MockProvisioner{}} provision.SetDetector(md) - config := config.MachineConfig{ + config := config.ClusterConfig{ Driver: driver.Mock, DockerEnv: []string{"FOO=BAR"}, DockerOpt: []string{"param=value"}, @@ -298,7 +298,7 @@ func TestStopHostError(t *testing.T) { func TestStopHost(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) - h, err := createHost(api, defaultMachineConfig) + h, err := createHost(api, defaultClusterConfig) if err != nil { t.Errorf("createHost failed: %v", err) } @@ -314,7 +314,7 @@ func TestStopHost(t *testing.T) { func TestDeleteHost(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) - if _, err := createHost(api, defaultMachineConfig); err != nil { + if _, err := createHost(api, defaultClusterConfig); err != nil { t.Errorf("createHost failed: %v", err) } @@ -326,7 +326,7 @@ func TestDeleteHost(t *testing.T) { func TestDeleteHostErrorDeletingVM(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) - h, err := createHost(api, defaultMachineConfig) + h, err := createHost(api, defaultClusterConfig) if err != nil { t.Errorf("createHost failed: %v", err) } @@ -343,7 +343,7 @@ func TestDeleteHostErrorDeletingFiles(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) api.RemoveError = true - if _, err := createHost(api, defaultMachineConfig); err != nil { + if _, err := createHost(api, defaultClusterConfig); err != nil { t.Errorf("createHost failed: %v", err) } @@ -357,7 +357,7 @@ func TestDeleteHostErrMachineNotExist(t *testing.T) { api := tests.NewMockAPI(t) // Create an incomplete host with machine does not exist error(i.e. User Interrupt Cancel) api.NotExistError = true - _, err := createHost(api, defaultMachineConfig) + _, err := createHost(api, defaultClusterConfig) if err != nil { t.Errorf("createHost failed: %v", err) } @@ -383,7 +383,7 @@ func TestGetHostStatus(t *testing.T) { checkState(state.None.String()) - if _, err := createHost(api, defaultMachineConfig); err != nil { + if _, err := createHost(api, defaultClusterConfig); err != nil { t.Errorf("createHost failed: %v", err) } diff --git a/pkg/minikube/machine/fix.go b/pkg/minikube/machine/fix.go index 2a69a37472..86ec7e2d70 100644 --- a/pkg/minikube/machine/fix.go +++ b/pkg/minikube/machine/fix.go @@ -54,7 +54,7 @@ var ( ) // fixHost fixes up a previously configured VM so that it is ready to run Kubernetes -func fixHost(api libmachine.API, mc config.MachineConfig) (*host.Host, error) { +func fixHost(api libmachine.API, mc config.ClusterConfig) (*host.Host, error) { out.T(out.Waiting, "Reconfiguring existing host ...") start := time.Now() diff --git a/pkg/minikube/machine/start.go b/pkg/minikube/machine/start.go index 33687dc151..b043f58051 100644 --- a/pkg/minikube/machine/start.go +++ b/pkg/minikube/machine/start.go @@ -61,7 +61,7 @@ var ( ) // StartHost starts a host VM. -func StartHost(api libmachine.API, cfg config.MachineConfig) (*host.Host, error) { +func StartHost(api libmachine.API, cfg config.ClusterConfig) (*host.Host, error) { // Prevent machine-driver boot races, as well as our own certificate race releaser, err := acquireMachinesLock(cfg.Name) if err != nil { @@ -85,7 +85,7 @@ func StartHost(api libmachine.API, cfg config.MachineConfig) (*host.Host, error) return fixHost(api, cfg) } -func engineOptions(cfg config.MachineConfig) *engine.Options { +func engineOptions(cfg config.ClusterConfig) *engine.Options { o := engine.Options{ Env: cfg.DockerEnv, InsecureRegistry: append([]string{constants.DefaultServiceCIDR}, cfg.InsecureRegistry...), @@ -96,7 +96,7 @@ func engineOptions(cfg config.MachineConfig) *engine.Options { return &o } -func createHost(api libmachine.API, cfg config.MachineConfig) (*host.Host, error) { +func createHost(api libmachine.API, cfg config.ClusterConfig) (*host.Host, error) { glog.Infof("createHost starting for %q (driver=%q)", cfg.Name, cfg.Driver) start := time.Now() defer func() { @@ -152,7 +152,7 @@ func createHost(api libmachine.API, cfg config.MachineConfig) (*host.Host, error } // postStart are functions shared between startHost and fixHost -func postStartSetup(h *host.Host, mc config.MachineConfig) error { +func postStartSetup(h *host.Host, mc config.ClusterConfig) error { glog.Infof("post-start starting for %q (driver=%q)", h.Name, h.DriverName) start := time.Now() defer func() { @@ -225,7 +225,7 @@ func acquireMachinesLock(name string) (mutex.Releaser, error) { } // showHostInfo shows host information -func showHostInfo(cfg config.MachineConfig) { +func showHostInfo(cfg config.ClusterConfig) { if driver.BareMetal(cfg.Driver) { info, err := getHostInfo() if err == nil { diff --git a/pkg/minikube/node/config.go b/pkg/minikube/node/config.go index ba4c7f5275..ce76ded6b4 100644 --- a/pkg/minikube/node/config.go +++ b/pkg/minikube/node/config.go @@ -81,7 +81,7 @@ func showVersionInfo(k8sVersion string, cr cruntime.Manager) { } // setupKubeAdm adds any requested files into the VM before Kubernetes is started -func setupKubeAdm(mAPI libmachine.API, cfg config.MachineConfig, node config.Node) bootstrapper.Bootstrapper { +func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, node config.Node) bootstrapper.Bootstrapper { bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper)) if err != nil { exit.WithError("Failed to get bootstrapper", err) @@ -99,7 +99,7 @@ func setupKubeAdm(mAPI libmachine.API, cfg config.MachineConfig, node config.Nod return bs } -func setupKubeconfig(h *host.Host, c *config.MachineConfig, n *config.Node, clusterName string) (*kubeconfig.Settings, error) { +func setupKubeconfig(h *host.Host, c *config.ClusterConfig, n *config.Node, clusterName string) (*kubeconfig.Settings, error) { addr, err := h.Driver.GetURL() if err != nil { exit.WithError("Failed to get driver URL", err) diff --git a/pkg/minikube/node/machine.go b/pkg/minikube/node/machine.go index d66e61510e..d0c4021222 100644 --- a/pkg/minikube/node/machine.go +++ b/pkg/minikube/node/machine.go @@ -39,7 +39,7 @@ import ( "k8s.io/minikube/pkg/util/retry" ) -func startMachine(cfg *config.MachineConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) { +func startMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) { m, err := machine.NewAPIClient() if err != nil { exit.WithError("Failed to get machine client", err) @@ -68,7 +68,7 @@ func startMachine(cfg *config.MachineConfig, node *config.Node) (runner command. } // startHost starts a new minikube host using a VM or None -func startHost(api libmachine.API, mc config.MachineConfig) (*host.Host, bool) { +func startHost(api libmachine.API, mc config.ClusterConfig) (*host.Host, bool) { exists, err := api.Exists(mc.Name) if err != nil { exit.WithError("Failed to check if machine exists", err) diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index 19b51d1770..4e2f75b94b 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -38,18 +38,19 @@ const ( ) // Add adds a new node config to an existing cluster. -func Add(cc *config.MachineConfig, name string, controlPlane bool, worker bool, k8sVersion string, profileName string) (*config.Node, error) { +func Add(cc *config.ClusterConfig, name string, controlPlane bool, worker bool, k8sVersion string, profileName string) (*config.Node, error) { n := config.Node{ Name: name, Worker: true, } + // TODO: Deal with parameters better. Ideally we should be able to acceot any node-specific minikube start params here. if controlPlane { n.ControlPlane = true } - if worker { - n.Worker = true + if !worker { + n.Worker = false } if k8sVersion != "" { @@ -69,7 +70,7 @@ func Add(cc *config.MachineConfig, name string, controlPlane bool, worker bool, } // Delete stops and deletes the given node from the given cluster -func Delete(cc config.MachineConfig, name string) error { +func Delete(cc config.ClusterConfig, name string) error { _, index, err := Retrieve(&cc, name) if err != nil { return err @@ -95,7 +96,7 @@ func Delete(cc config.MachineConfig, name string) error { } // Retrieve finds the node by name in the given cluster -func Retrieve(cc *config.MachineConfig, name string) (*config.Node, int, error) { +func Retrieve(cc *config.ClusterConfig, name string) (*config.Node, int, error) { for i, n := range cc.Nodes { if n.Name == name { return &n, i, nil @@ -106,7 +107,7 @@ func Retrieve(cc *config.MachineConfig, name string) (*config.Node, int, error) } // Save saves a node to a cluster -func Save(cfg *config.MachineConfig, node *config.Node) error { +func Save(cfg *config.ClusterConfig, node *config.Node) error { update := false for i, n := range cfg.Nodes { if n.Name == node.Name { diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 07d14d4525..1fe966e5ac 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -33,7 +33,7 @@ import ( ) // Start spins up a guest and starts the kubernetes node. -func Start(mc config.MachineConfig, n config.Node, primary bool, existingAddons map[string]bool) (*kubeconfig.Settings, error) { +func Start(mc config.ClusterConfig, n config.Node, primary bool, existingAddons map[string]bool) (*kubeconfig.Settings, error) { // Now that the ISO is downloaded, pull images in the background while the VM boots. var cacheGroup errgroup.Group beginCacheRequiredImages(&cacheGroup, mc.KubernetesConfig.ImageRepository, n.KubernetesVersion) diff --git a/pkg/minikube/registry/drvs/docker/docker.go b/pkg/minikube/registry/drvs/docker/docker.go index 38cc95acb7..0b66dfdecb 100644 --- a/pkg/minikube/registry/drvs/docker/docker.go +++ b/pkg/minikube/registry/drvs/docker/docker.go @@ -43,7 +43,7 @@ func init() { } } -func configure(mc config.MachineConfig) (interface{}, error) { +func configure(mc config.ClusterConfig) (interface{}, error) { return kic.NewDriver(kic.Config{ MachineName: mc.Name, StorePath: localpath.MiniPath(), diff --git a/pkg/minikube/registry/drvs/hyperkit/hyperkit.go b/pkg/minikube/registry/drvs/hyperkit/hyperkit.go index 50a4e5a408..47a3db9091 100644 --- a/pkg/minikube/registry/drvs/hyperkit/hyperkit.go +++ b/pkg/minikube/registry/drvs/hyperkit/hyperkit.go @@ -57,7 +57,7 @@ func init() { } } -func configure(config cfg.MachineConfig) (interface{}, error) { +func configure(config cfg.ClusterConfig) (interface{}, error) { u := config.UUID if u == "" { u = uuid.NewUUID().String() diff --git a/pkg/minikube/registry/drvs/hyperv/hyperv.go b/pkg/minikube/registry/drvs/hyperv/hyperv.go index 9f15d0c470..89f63c93f3 100644 --- a/pkg/minikube/registry/drvs/hyperv/hyperv.go +++ b/pkg/minikube/registry/drvs/hyperv/hyperv.go @@ -52,7 +52,7 @@ func init() { } } -func configure(config cfg.MachineConfig) (interface{}, error) { +func configure(config cfg.ClusterConfig) (interface{}, error) { d := hyperv.NewDriver(config.Name, localpath.MiniPath()) d.Boot2DockerURL = config.Downloader.GetISOFileURI(config.MinikubeISO) d.VSwitch = config.HypervVirtualSwitch diff --git a/pkg/minikube/registry/drvs/kvm2/kvm2.go b/pkg/minikube/registry/drvs/kvm2/kvm2.go index dedad73bfb..a3dbf67193 100644 --- a/pkg/minikube/registry/drvs/kvm2/kvm2.go +++ b/pkg/minikube/registry/drvs/kvm2/kvm2.go @@ -67,7 +67,7 @@ type kvmDriver struct { ConnectionURI string } -func configure(mc config.MachineConfig) (interface{}, error) { +func configure(mc config.ClusterConfig) (interface{}, error) { name := mc.Name return kvmDriver{ BaseDriver: &drivers.BaseDriver{ diff --git a/pkg/minikube/registry/drvs/none/none.go b/pkg/minikube/registry/drvs/none/none.go index aa8523cab6..4e1ae1a794 100644 --- a/pkg/minikube/registry/drvs/none/none.go +++ b/pkg/minikube/registry/drvs/none/none.go @@ -42,7 +42,7 @@ func init() { } } -func configure(mc config.MachineConfig) (interface{}, error) { +func configure(mc config.ClusterConfig) (interface{}, error) { return none.NewDriver(none.Config{ MachineName: mc.Name, StorePath: localpath.MiniPath(), diff --git a/pkg/minikube/registry/drvs/parallels/parallels.go b/pkg/minikube/registry/drvs/parallels/parallels.go index de319ec8fb..79d0e9085e 100644 --- a/pkg/minikube/registry/drvs/parallels/parallels.go +++ b/pkg/minikube/registry/drvs/parallels/parallels.go @@ -44,7 +44,7 @@ func init() { } -func configure(config cfg.MachineConfig) (interface{}, error) { +func configure(config cfg.ClusterConfig) (interface{}, error) { d := parallels.NewDriver(config.Name, localpath.MiniPath()).(*parallels.Driver) d.Boot2DockerURL = config.Downloader.GetISOFileURI(config.MinikubeISO) d.Memory = config.Memory diff --git a/pkg/minikube/registry/drvs/podman/podman.go b/pkg/minikube/registry/drvs/podman/podman.go index a8e19dbbc0..ec5d6013ac 100644 --- a/pkg/minikube/registry/drvs/podman/podman.go +++ b/pkg/minikube/registry/drvs/podman/podman.go @@ -49,7 +49,7 @@ func init() { } } -func configure(mc config.MachineConfig) (interface{}, error) { +func configure(mc config.ClusterConfig) (interface{}, error) { return kic.NewDriver(kic.Config{ MachineName: mc.Name, StorePath: localpath.MiniPath(), diff --git a/pkg/minikube/registry/drvs/virtualbox/virtualbox.go b/pkg/minikube/registry/drvs/virtualbox/virtualbox.go index bfba0e42db..c3888c3758 100644 --- a/pkg/minikube/registry/drvs/virtualbox/virtualbox.go +++ b/pkg/minikube/registry/drvs/virtualbox/virtualbox.go @@ -49,7 +49,7 @@ func init() { } } -func configure(mc config.MachineConfig) (interface{}, error) { +func configure(mc config.ClusterConfig) (interface{}, error) { d := virtualbox.NewDriver(mc.Name, localpath.MiniPath()) d.Boot2DockerURL = mc.Downloader.GetISOFileURI(mc.MinikubeISO) d.Memory = mc.Memory diff --git a/pkg/minikube/registry/drvs/vmware/vmware.go b/pkg/minikube/registry/drvs/vmware/vmware.go index 885063cde2..0333dce541 100644 --- a/pkg/minikube/registry/drvs/vmware/vmware.go +++ b/pkg/minikube/registry/drvs/vmware/vmware.go @@ -39,7 +39,7 @@ func init() { } } -func configure(mc config.MachineConfig) (interface{}, error) { +func configure(mc config.ClusterConfig) (interface{}, error) { d := vmwcfg.NewConfig(mc.Name, localpath.MiniPath()) d.Boot2DockerURL = mc.Downloader.GetISOFileURI(mc.MinikubeISO) d.Memory = mc.Memory diff --git a/pkg/minikube/registry/drvs/vmwarefusion/vmwarefusion.go b/pkg/minikube/registry/drvs/vmwarefusion/vmwarefusion.go index bb5ed4196b..524e50f88c 100644 --- a/pkg/minikube/registry/drvs/vmwarefusion/vmwarefusion.go +++ b/pkg/minikube/registry/drvs/vmwarefusion/vmwarefusion.go @@ -44,7 +44,7 @@ func init() { } } -func configure(config cfg.MachineConfig) (interface{}, error) { +func configure(config cfg.ClusterConfig) (interface{}, error) { d := vmwarefusion.NewDriver(config.Name, localpath.MiniPath()).(*vmwarefusion.Driver) d.Boot2DockerURL = config.Downloader.GetISOFileURI(config.MinikubeISO) d.Memory = config.Memory diff --git a/pkg/minikube/registry/registry.go b/pkg/minikube/registry/registry.go index 159c7a4568..e5fb98ce51 100644 --- a/pkg/minikube/registry/registry.go +++ b/pkg/minikube/registry/registry.go @@ -60,7 +60,7 @@ type Registry interface { } // Configurator emits a struct to be marshalled into JSON for Machine Driver -type Configurator func(config.MachineConfig) (interface{}, error) +type Configurator func(config.ClusterConfig) (interface{}, error) // Loader is a function that loads a byte stream and creates a driver. type Loader func() drivers.Driver diff --git a/pkg/minikube/tunnel/cluster_inspector.go b/pkg/minikube/tunnel/cluster_inspector.go index bb86db778a..8f9001e3db 100644 --- a/pkg/minikube/tunnel/cluster_inspector.go +++ b/pkg/minikube/tunnel/cluster_inspector.go @@ -64,7 +64,7 @@ func (m *clusterInspector) getStateAndRoute() (HostState, *Route, error) { if err != nil { return hostState, nil, err } - var c *config.MachineConfig + var c *config.ClusterConfig c, err = m.configLoader.LoadConfigFromFile(m.machineName) if err != nil { err = errors.Wrapf(err, "error loading config for %s", m.machineName) @@ -80,7 +80,7 @@ func (m *clusterInspector) getStateAndRoute() (HostState, *Route, error) { return hostState, route, nil } -func getRoute(host *host.Host, clusterConfig config.MachineConfig) (*Route, error) { +func getRoute(host *host.Host, clusterConfig config.ClusterConfig) (*Route, error) { hostDriverIP, err := host.Driver.GetIP() if err != nil { return nil, errors.Wrapf(err, "error getting host IP for %s", host.Name) diff --git a/pkg/minikube/tunnel/cluster_inspector_test.go b/pkg/minikube/tunnel/cluster_inspector_test.go index c3be2e2483..834bd8241d 100644 --- a/pkg/minikube/tunnel/cluster_inspector_test.go +++ b/pkg/minikube/tunnel/cluster_inspector_test.go @@ -66,7 +66,7 @@ func TestMinikubeCheckReturnsHostInformation(t *testing.T) { } configLoader := &stubConfigLoader{ - c: &config.MachineConfig{ + c: &config.ClusterConfig{ KubernetesConfig: config.KubernetesConfig{ ServiceCIDR: "96.0.0.0/12", }, @@ -104,7 +104,7 @@ func TestMinikubeCheckReturnsHostInformation(t *testing.T) { } func TestUnparseableCIDR(t *testing.T) { - cfg := config.MachineConfig{ + cfg := config.ClusterConfig{ KubernetesConfig: config.KubernetesConfig{ ServiceCIDR: "bad.cidr.0.0/12", }} @@ -124,7 +124,7 @@ func TestUnparseableCIDR(t *testing.T) { func TestRouteIPDetection(t *testing.T) { expectedTargetCIDR := "10.96.0.0/12" - cfg := config.MachineConfig{ + cfg := config.ClusterConfig{ KubernetesConfig: config.KubernetesConfig{ ServiceCIDR: expectedTargetCIDR, }, diff --git a/pkg/minikube/tunnel/test_doubles.go b/pkg/minikube/tunnel/test_doubles.go index 5ac4874593..b8a8ae009f 100644 --- a/pkg/minikube/tunnel/test_doubles.go +++ b/pkg/minikube/tunnel/test_doubles.go @@ -82,14 +82,14 @@ func (r *fakeRouter) Inspect(route *Route) (exists bool, conflict string, overla } type stubConfigLoader struct { - c *config.MachineConfig + c *config.ClusterConfig e error } -func (l *stubConfigLoader) WriteConfigToFile(profileName string, cc *config.MachineConfig, miniHome ...string) error { +func (l *stubConfigLoader) WriteConfigToFile(profileName string, cc *config.ClusterConfig, miniHome ...string) error { return l.e } -func (l *stubConfigLoader) LoadConfigFromFile(profile string, miniHome ...string) (*config.MachineConfig, error) { +func (l *stubConfigLoader) LoadConfigFromFile(profile string, miniHome ...string) (*config.ClusterConfig, error) { return l.c, l.e } diff --git a/pkg/minikube/tunnel/tunnel_test.go b/pkg/minikube/tunnel/tunnel_test.go index 20048f36d0..c017fd7aac 100644 --- a/pkg/minikube/tunnel/tunnel_test.go +++ b/pkg/minikube/tunnel/tunnel_test.go @@ -423,7 +423,7 @@ func TestTunnel(t *testing.T) { }, } configLoader := &stubConfigLoader{ - c: &config.MachineConfig{ + c: &config.ClusterConfig{ KubernetesConfig: config.KubernetesConfig{ ServiceCIDR: tc.serviceCIDR, }}, @@ -478,7 +478,7 @@ func TestErrorCreatingTunnel(t *testing.T) { } configLoader := &stubConfigLoader{ - c: &config.MachineConfig{ + c: &config.ClusterConfig{ KubernetesConfig: config.KubernetesConfig{ ServiceCIDR: "10.96.0.0/12", }}, diff --git a/site/content/en/docs/Contributing/drivers.en.md b/site/content/en/docs/Contributing/drivers.en.md index aecd0e1179..6c3f4a74a8 100644 --- a/site/content/en/docs/Contributing/drivers.en.md +++ b/site/content/en/docs/Contributing/drivers.en.md @@ -85,7 +85,7 @@ func init() { }) } -func createVMwareFusionHost(config cfg.MachineConfig) interface{} { +func createVMwareFusionHost(config cfg.ClusterConfig) interface{} { d := vmwarefusion.NewDriver(config.Name, localpath.MiniPath()).(*vmwarefusion.Driver) d.Boot2DockerURL = config.Downloader.GetISOFileURI(config.MinikubeISO) d.Memory = config.Memory From ec191119dfebacf9ef385e745c4c1bde1982e542 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Fri, 21 Feb 2020 15:46:02 -0800 Subject: [PATCH 02/42] let's move some start code around --- cmd/minikube/cmd/delete.go | 24 +++++---- cmd/minikube/cmd/node_add.go | 2 +- cmd/minikube/cmd/node_start.go | 2 +- cmd/minikube/cmd/start.go | 8 +-- pkg/minikube/bootstrapper/bootstrapper.go | 1 + pkg/minikube/cluster/cluster.go | 3 ++ pkg/minikube/node/config.go | 63 ++--------------------- pkg/minikube/node/machine.go | 3 +- pkg/minikube/node/start.go | 29 +++-------- 9 files changed, 36 insertions(+), 99 deletions(-) diff --git a/cmd/minikube/cmd/delete.go b/cmd/minikube/cmd/delete.go index f58bd5440e..324d6d6403 100644 --- a/cmd/minikube/cmd/delete.go +++ b/cmd/minikube/cmd/delete.go @@ -191,14 +191,20 @@ func deleteProfile(profile *pkg_config.Profile) error { } if err == nil && driver.BareMetal(cc.Driver) { - if err := uninstallKubernetes(api, profile.Name, cc.KubernetesConfig, viper.GetString(cmdcfg.Bootstrapper)); err != nil { - deletionError, ok := err.(DeletionError) - if ok { - delErr := profileDeletionErr(profile.Name, fmt.Sprintf("%v", err)) - deletionError.Err = delErr - return deletionError + var e error + for _, n := range cc.Nodes { + if err := uninstallKubernetes(api, profile.Name, cc.KubernetesConfig, viper.GetString(cmdcfg.Bootstrapper), n.Name); err != nil { + deletionError, ok := err.(DeletionError) + if ok { + delErr := profileDeletionErr(profile.Name, fmt.Sprintf("%v", err)) + deletionError.Err = delErr + e = deletionError + } + e = err } - return err + } + if e != nil { + return e } } @@ -272,9 +278,9 @@ func profileDeletionErr(profileName string, additionalInfo string) error { return fmt.Errorf("error deleting profile \"%s\": %s", profileName, additionalInfo) } -func uninstallKubernetes(api libmachine.API, profile string, kc pkg_config.KubernetesConfig, bsName string) error { +func uninstallKubernetes(api libmachine.API, profile string, kc pkg_config.KubernetesConfig, bsName string, nodeName string) error { out.T(out.Resetting, "Uninstalling Kubernetes {{.kubernetes_version}} using {{.bootstrapper_name}} ...", out.V{"kubernetes_version": kc.KubernetesVersion, "bootstrapper_name": bsName}) - clusterBootstrapper, err := cluster.Bootstrapper(api, bsName) + clusterBootstrapper, err := cluster.Bootstrapper(api, bsName, nodeName) if err != nil { return DeletionError{Err: fmt.Errorf("unable to get bootstrapper: %v", err), Errtype: Fatal} } diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index 239c1df7ec..7d6b0841c6 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -54,7 +54,7 @@ var nodeAddCmd = &cobra.Command{ exit.WithError("Error adding node to cluster", err) } - _, err = node.Start(*mc, *n, false, nil) + err = node.Start(*mc, *n, false, nil) if err != nil { exit.WithError("Error starting node", err) } diff --git a/cmd/minikube/cmd/node_start.go b/cmd/minikube/cmd/node_start.go index d62cdf7ef1..c08c8b1037 100644 --- a/cmd/minikube/cmd/node_start.go +++ b/cmd/minikube/cmd/node_start.go @@ -61,7 +61,7 @@ var nodeStartCmd = &cobra.Command{ } // Start it up baby - _, err = node.Start(*cc, *n, false, nil) + err = node.Start(*cc, *n, true, nil) if err != nil { out.FatalT("Failed to start node {{.name}}", out.V{"name": name}) } diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 72d83bd549..3154c2370e 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -174,7 +174,7 @@ func initMinikubeFlags() { // initKubernetesFlags inits the commandline flags for kubernetes related options func initKubernetesFlags() { startCmd.Flags().String(kubernetesVersion, "", "The kubernetes version that the minikube VM will use (ex: v1.2.3)") - startCmd.Flags().Var(&node.ExtraOptions, "extra-config", + startCmd.Flags().Var(&cluster.ExtraOptions, "extra-config", `A set of key=value pairs that describe configuration that may be passed to different components. The key should be '.' separated, and the first part before the dot is the component to apply the configuration to. Valid components are: kubelet, kubeadm, apiserver, controller-manager, etcd, proxy, scheduler @@ -688,7 +688,7 @@ func validateFlags(cmd *cobra.Command, drvName string) { validateCPUCount(driver.BareMetal(drvName)) // check that kubeadm extra args contain only whitelisted parameters - for param := range node.ExtraOptions.AsMap().Get(bsutil.Kubeadm) { + for param := range cluster.ExtraOptions.AsMap().Get(bsutil.Kubeadm) { if !config.ContainsParam(bsutil.KubeadmExtraArgsWhitelist[bsutil.KubeadmCmdParam], param) && !config.ContainsParam(bsutil.KubeadmExtraArgsWhitelist[bsutil.KubeadmConfigParam], param) { exit.UsageT("Sorry, the kubeadm.{{.parameter_name}} parameter is currently not supported by --extra-config", out.V{"parameter_name": param}) @@ -821,7 +821,7 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string) NetworkPlugin: selectedNetworkPlugin, ServiceCIDR: viper.GetString(serviceCIDR), ImageRepository: repository, - ExtraOptions: node.ExtraOptions, + ExtraOptions: cluster.ExtraOptions, ShouldLoadCachedImages: viper.GetBool(cacheImages), EnableDefaultCNI: selectedEnableDefaultCNI, }, @@ -855,7 +855,7 @@ func autoSetDriverOptions(cmd *cobra.Command, drvName string) (err error) { if !cmd.Flags().Changed("extra-config") && len(hints.ExtraOptions) > 0 { for _, eo := range hints.ExtraOptions { glog.Infof("auto setting extra-config to %q.", eo) - err = node.ExtraOptions.Set(eo) + err = cluster.ExtraOptions.Set(eo) if err != nil { err = errors.Wrapf(err, "setting extra option %s", eo) } diff --git a/pkg/minikube/bootstrapper/bootstrapper.go b/pkg/minikube/bootstrapper/bootstrapper.go index eba5167179..d09bef5521 100644 --- a/pkg/minikube/bootstrapper/bootstrapper.go +++ b/pkg/minikube/bootstrapper/bootstrapper.go @@ -40,6 +40,7 @@ type Bootstrapper interface { DeleteCluster(config.KubernetesConfig) error WaitForCluster(config.ClusterConfig, time.Duration) error JoinCluster(config.ClusterConfig, config.Node, string) error + UpdateNode(config.ClusterConfig) // LogCommands returns a map of log type to a command which will display that log. LogCommands(LogOptions) map[string]string SetupCerts(config.KubernetesConfig, config.Node) error diff --git a/pkg/minikube/cluster/cluster.go b/pkg/minikube/cluster/cluster.go index a2b9e06613..baafe689a7 100644 --- a/pkg/minikube/cluster/cluster.go +++ b/pkg/minikube/cluster/cluster.go @@ -26,9 +26,12 @@ import ( "k8s.io/minikube/pkg/minikube/bootstrapper" "k8s.io/minikube/pkg/minikube/bootstrapper/kubeadm" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/exit" ) +var ExtraOptions config.ExtraOptionSlice + // This init function is used to set the logtostderr variable to false so that INFO level log info does not clutter the CLI // INFO lvl logging is displayed due to the kubernetes api calling flag.Set("logtostderr", "true") in its init() // see: https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/util/logs/logs.go#L32-L34 diff --git a/pkg/minikube/node/config.go b/pkg/minikube/node/config.go index ce76ded6b4..eb1a61f68c 100644 --- a/pkg/minikube/node/config.go +++ b/pkg/minikube/node/config.go @@ -22,31 +22,23 @@ import ( "os/exec" "path/filepath" "strconv" - "strings" - "github.com/docker/machine/libmachine" - "github.com/docker/machine/libmachine/host" "github.com/golang/glog" "github.com/spf13/viper" - cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" - "k8s.io/minikube/pkg/minikube/bootstrapper" - "k8s.io/minikube/pkg/minikube/cluster" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" - "k8s.io/minikube/pkg/minikube/kubeconfig" "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/util/lock" ) var ( - DockerEnv []string - DockerOpt []string - ExtraOptions config.ExtraOptionSlice - AddonList []string + DockerEnv []string + DockerOpt []string + AddonList []string ) // configureRuntimes does what needs to happen to get a runtime going. @@ -80,55 +72,6 @@ func showVersionInfo(k8sVersion string, cr cruntime.Manager) { } } -// setupKubeAdm adds any requested files into the VM before Kubernetes is started -func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, node config.Node) bootstrapper.Bootstrapper { - bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper)) - if err != nil { - exit.WithError("Failed to get bootstrapper", err) - } - for _, eo := range ExtraOptions { - out.T(out.Option, "{{.extra_option_component_name}}.{{.key}}={{.value}}", out.V{"extra_option_component_name": eo.Component, "key": eo.Key, "value": eo.Value}) - } - // Loads cached images, generates config files, download binaries - if err := bs.UpdateCluster(cfg); err != nil { - exit.WithError("Failed to update cluster", err) - } - if err := bs.SetupCerts(cfg.KubernetesConfig, node); err != nil { - exit.WithError("Failed to setup certs", err) - } - return bs -} - -func setupKubeconfig(h *host.Host, c *config.ClusterConfig, n *config.Node, clusterName string) (*kubeconfig.Settings, error) { - addr, err := h.Driver.GetURL() - if err != nil { - exit.WithError("Failed to get driver URL", err) - } - if !driver.IsKIC(h.DriverName) { - addr = strings.Replace(addr, "tcp://", "https://", -1) - addr = strings.Replace(addr, ":2376", ":"+strconv.Itoa(n.Port), -1) - } - - if c.KubernetesConfig.APIServerName != constants.APIServerName { - addr = strings.Replace(addr, n.IP, c.KubernetesConfig.APIServerName, -1) - } - kcs := &kubeconfig.Settings{ - ClusterName: clusterName, - ClusterServerAddress: addr, - ClientCertificate: localpath.MakeMiniPath("client.crt"), - ClientKey: localpath.MakeMiniPath("client.key"), - CertificateAuthority: localpath.MakeMiniPath("ca.crt"), - KeepContext: viper.GetBool(keepContext), - EmbedCerts: viper.GetBool(embedCerts), - } - - kcs.SetPath(kubeconfig.PathFromEnv()) - if err := kubeconfig.Update(kcs); err != nil { - return kcs, err - } - return kcs, nil -} - // configureMounts configures any requested filesystem mounts func configureMounts() { if !viper.GetBool(createMount) { diff --git a/pkg/minikube/node/machine.go b/pkg/minikube/node/machine.go index d0c4021222..279e233c2a 100644 --- a/pkg/minikube/node/machine.go +++ b/pkg/minikube/node/machine.go @@ -39,7 +39,8 @@ import ( "k8s.io/minikube/pkg/util/retry" ) -func startMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) { +// StartMachine starts a VM +func StartMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) { m, err := machine.NewAPIClient() if err != nil { exit.WithError("Failed to get machine client", err) diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 1fe966e5ac..353c5ae4e9 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -22,18 +22,17 @@ import ( "github.com/spf13/viper" "golang.org/x/sync/errgroup" "k8s.io/minikube/pkg/addons" + "k8s.io/minikube/pkg/minikube/cluster" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" - "k8s.io/minikube/pkg/minikube/kubeconfig" "k8s.io/minikube/pkg/minikube/localpath" - "k8s.io/minikube/pkg/minikube/logs" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/util" ) // Start spins up a guest and starts the kubernetes node. -func Start(mc config.ClusterConfig, n config.Node, primary bool, existingAddons map[string]bool) (*kubeconfig.Settings, error) { +func Start(mc config.ClusterConfig, n config.Node, preExists bool, existingAddons map[string]bool) error { // Now that the ISO is downloaded, pull images in the background while the VM boots. var cacheGroup errgroup.Group beginCacheRequiredImages(&cacheGroup, mc.KubernetesConfig.ImageRepository, n.KubernetesVersion) @@ -44,33 +43,17 @@ func Start(mc config.ClusterConfig, n config.Node, primary bool, existingAddons exit.WithError("Failed to save config", err) } + bs, err := cluster.Bootstrapper() + k8sVersion := mc.KubernetesConfig.KubernetesVersion driverName := mc.Driver // exits here in case of --download-only option. handleDownloadOnly(&cacheGroup, k8sVersion) - mRunner, preExists, machineAPI, host := startMachine(&mc, &n) - defer machineAPI.Close() // configure the runtime (docker, containerd, crio) cr := configureRuntimes(mRunner, driverName, mc.KubernetesConfig) showVersionInfo(k8sVersion, cr) waitCacheRequiredImages(&cacheGroup) - //TODO(sharifelgamal): Part out the cluster-wide operations, perhaps using the "primary" param - - // Must be written before bootstrap, otherwise health checks may flake due to stale IP - kubeconfig, err := setupKubeconfig(host, &mc, &n, mc.Name) - if err != nil { - exit.WithError("Failed to setup kubeconfig", err) - } - - // setup kubeadm (must come after setupKubeconfig) - bs := setupKubeAdm(machineAPI, mc, n) - - // pull images or restart cluster - out.T(out.Launch, "Launching Kubernetes ... ") - if err := bs.StartCluster(mc); err != nil { - exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, mRunner)) - } configureMounts() // enable addons, both old and new! @@ -80,7 +63,7 @@ func Start(mc config.ClusterConfig, n config.Node, primary bool, existingAddons } addons.Start(viper.GetString(config.MachineProfile), ea, AddonList) - if err = CacheAndLoadImagesInConfig(); err != nil { + if err := CacheAndLoadImagesInConfig(); err != nil { out.T(out.FailureType, "Unable to load cached images from config file.") } @@ -96,7 +79,7 @@ func Start(mc config.ClusterConfig, n config.Node, primary bool, existingAddons } } - return kubeconfig, nil + return nil } // prepareNone prepares the user and host for the joy of the "none" driver From 785338737d11f27ef5ad421714b0a1e37c42f2a2 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Fri, 21 Feb 2020 16:06:11 -0800 Subject: [PATCH 03/42] add new setup cluster file --- pkg/minikube/cluster/setup.go | 124 ++++++++++++++++++++++++++++++++++ 1 file changed, 124 insertions(+) create mode 100644 pkg/minikube/cluster/setup.go diff --git a/pkg/minikube/cluster/setup.go b/pkg/minikube/cluster/setup.go new file mode 100644 index 0000000000..9ce35bcf9c --- /dev/null +++ b/pkg/minikube/cluster/setup.go @@ -0,0 +1,124 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "strconv" + "strings" + + "github.com/docker/machine/libmachine" + "github.com/docker/machine/libmachine/host" + "github.com/spf13/viper" + cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" + "k8s.io/minikube/pkg/minikube/bootstrapper" + "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/constants" + "k8s.io/minikube/pkg/minikube/cruntime" + "k8s.io/minikube/pkg/minikube/driver" + "k8s.io/minikube/pkg/minikube/exit" + "k8s.io/minikube/pkg/minikube/kubeconfig" + "k8s.io/minikube/pkg/minikube/localpath" + "k8s.io/minikube/pkg/minikube/logs" + "k8s.io/minikube/pkg/minikube/out" +) + +const ( + waitTimeout = "wait-timeout" + waitUntilHealthy = "wait" + embedCerts = "embed-certs" + keepContext = "keep-context" +) + +// InitialSetup performs all necessary operations on the initial control plane node when first spinning up a cluster +func InitialSetup(cc config.ClusterConfig, n config.Node, cr cruntime.Manager) (*kubeconfig.Settings, error) { + mRunner, preExists, machineAPI, host := StartMachine(&cc, &n) + defer machineAPI.Close() + + // Must be written before bootstrap, otherwise health checks may flake due to stale IP + kubeconfig, err := setupKubeconfig(host, &cc, &n, cc.Name) + if err != nil { + exit.WithError("Failed to setup kubeconfig", err) + } + + // setup kubeadm (must come after setupKubeconfig) + bs := setupKubeAdm(machineAPI, cc, n) + + // pull images or restart cluster + out.T(out.Launch, "Launching Kubernetes ... ") + if err := bs.StartCluster(cc); err != nil { + exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, mRunner)) + } + + // Skip pre-existing, because we already waited for health + if viper.GetBool(waitUntilHealthy) && !preExists { + if err := bs.WaitForCluster(cc, viper.GetDuration(waitTimeout)); err != nil { + exit.WithError("Wait failed", err) + } + } + + return kubeconfig, nil + +} + +// setupKubeAdm adds any requested files into the VM before Kubernetes is started +func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node) bootstrapper.Bootstrapper { + bs, err := Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), n.Name) + if err != nil { + exit.WithError("Failed to get bootstrapper", err) + } + for _, eo := range ExtraOptions { + out.T(out.Option, "{{.extra_option_component_name}}.{{.key}}={{.value}}", out.V{"extra_option_component_name": eo.Component, "key": eo.Key, "value": eo.Value}) + } + // Loads cached images, generates config files, download binaries + if err := bs.UpdateCluster(cfg); err != nil { + exit.WithError("Failed to update cluster", err) + } + if err := bs.SetupCerts(cfg.KubernetesConfig, n); err != nil { + exit.WithError("Failed to setup certs", err) + } + return bs +} + +func setupKubeconfig(h *host.Host, c *config.ClusterConfig, n *config.Node, clusterName string) (*kubeconfig.Settings, error) { + addr, err := h.Driver.GetURL() + if err != nil { + exit.WithError("Failed to get driver URL", err) + } + if !driver.IsKIC(h.DriverName) { + addr = strings.Replace(addr, "tcp://", "https://", -1) + addr = strings.Replace(addr, ":2376", ":"+strconv.Itoa(n.Port), -1) + } + + if c.KubernetesConfig.APIServerName != constants.APIServerName { + addr = strings.Replace(addr, n.IP, c.KubernetesConfig.APIServerName, -1) + } + kcs := &kubeconfig.Settings{ + ClusterName: clusterName, + ClusterServerAddress: addr, + ClientCertificate: localpath.MakeMiniPath("client.crt"), + ClientKey: localpath.MakeMiniPath("client.key"), + CertificateAuthority: localpath.MakeMiniPath("ca.crt"), + KeepContext: viper.GetBool(keepContext), + EmbedCerts: viper.GetBool(embedCerts), + } + + kcs.SetPath(kubeconfig.PathFromEnv()) + if err := kubeconfig.Update(kcs); err != nil { + return kcs, err + } + return kcs, nil +} From f22efd871aa7173f20ba41dc7e43735daa8dc051 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Sun, 23 Feb 2020 22:41:08 -0800 Subject: [PATCH 04/42] mostly moving code around and adding UpdateNode --- cmd/minikube/cmd/node_add.go | 7 +- cmd/minikube/cmd/node_start.go | 2 +- cmd/minikube/cmd/start.go | 39 ++-- pkg/minikube/bootstrapper/bootstrapper.go | 4 +- pkg/minikube/bootstrapper/bsutil/ops.go | 2 +- pkg/minikube/bootstrapper/certs.go | 6 +- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 34 +++- pkg/minikube/cluster/cluster.go | 3 - pkg/minikube/cluster/setup.go | 177 +++++++++++++++++- pkg/minikube/config/node.go | 11 ++ pkg/minikube/config/profile.go | 18 ++ pkg/minikube/machine/cluster_test.go | 27 ++- pkg/minikube/machine/fix.go | 6 +- pkg/minikube/machine/start.go | 12 +- pkg/minikube/node/config.go | 67 +------ pkg/minikube/node/machine.go | 185 ------------------- pkg/minikube/node/node.go | 10 +- pkg/minikube/node/start.go | 47 +++-- 18 files changed, 334 insertions(+), 323 deletions(-) delete mode 100644 pkg/minikube/node/machine.go diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index 7d6b0841c6..cf3a1c626e 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -49,16 +49,11 @@ var nodeAddCmd = &cobra.Command{ } out.T(out.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": profile}) - n, err := node.Add(mc, name, cp, worker, "", profile) + err = node.Add(mc, name, cp, worker, "", profile) if err != nil { exit.WithError("Error adding node to cluster", err) } - err = node.Start(*mc, *n, false, nil) - if err != nil { - exit.WithError("Error starting node", err) - } - out.T(out.Ready, "Successfully added {{.name}} to {{.cluster}}!", out.V{"name": name, "cluster": profile}) }, } diff --git a/cmd/minikube/cmd/node_start.go b/cmd/minikube/cmd/node_start.go index c08c8b1037..9d17ab1b7f 100644 --- a/cmd/minikube/cmd/node_start.go +++ b/cmd/minikube/cmd/node_start.go @@ -61,7 +61,7 @@ var nodeStartCmd = &cobra.Command{ } // Start it up baby - err = node.Start(*cc, *n, true, nil) + err = node.Start(*cc, *n, nil) if err != nil { out.FatalT("Failed to start node {{.name}}", out.V{"name": name}) } diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 1162429547..fbb30b7adb 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -118,6 +118,7 @@ const ( autoUpdate = "auto-update-drivers" hostOnlyNicType = "host-only-nic-type" natNicType = "nat-nic-type" + nodes = "nodes" ) var ( @@ -160,7 +161,7 @@ func initMinikubeFlags() { startCmd.Flags().String(containerRuntime, "docker", "The container runtime to be used (docker, crio, containerd).") startCmd.Flags().Bool(createMount, false, "This will start the mount daemon and automatically mount files into minikube.") startCmd.Flags().String(mountString, constants.DefaultMountDir+":/minikube-host", "The argument to pass the minikube mount command on start.") - startCmd.Flags().StringArrayVar(&node.AddonList, "addons", nil, "Enable addons. see `minikube addons list` for a list of valid addon names.") + startCmd.Flags().StringArrayVar(&config.AddonList, "addons", nil, "Enable addons. see `minikube addons list` for a list of valid addon names.") startCmd.Flags().String(criSocket, "", "The cri socket path to be used.") startCmd.Flags().String(networkPlugin, "", "The name of the network plugin.") startCmd.Flags().Bool(enableDefaultCNI, false, "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \"--network-plugin=cni\".") @@ -169,12 +170,13 @@ func initMinikubeFlags() { startCmd.Flags().Bool(nativeSSH, true, "Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'.") startCmd.Flags().Bool(autoUpdate, true, "If set, automatically updates drivers to the latest version. Defaults to true.") startCmd.Flags().Bool(installAddons, true, "If set, install addons. Defaults to true.") + startCmd.Flags().IntP(nodes, "n", 1, "The number of nodes to spin up. Defaults to 1.") } // initKubernetesFlags inits the commandline flags for kubernetes related options func initKubernetesFlags() { startCmd.Flags().String(kubernetesVersion, "", "The kubernetes version that the minikube VM will use (ex: v1.2.3)") - startCmd.Flags().Var(&cluster.ExtraOptions, "extra-config", + startCmd.Flags().Var(&config.ExtraOptions, "extra-config", `A set of key=value pairs that describe configuration that may be passed to different components. The key should be '.' separated, and the first part before the dot is the component to apply the configuration to. Valid components are: kubelet, kubeadm, apiserver, controller-manager, etcd, proxy, scheduler @@ -226,8 +228,8 @@ func initNetworkingFlags() { startCmd.Flags().String(imageRepository, "", "Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to \"auto\" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers") startCmd.Flags().String(imageMirrorCountry, "", "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.") startCmd.Flags().String(serviceCIDR, constants.DefaultServiceCIDR, "The CIDR to be used for service cluster IPs.") - startCmd.Flags().StringArrayVar(&node.DockerEnv, "docker-env", nil, "Environment variables to pass to the Docker daemon. (format: key=value)") - startCmd.Flags().StringArrayVar(&node.DockerOpt, "docker-opt", nil, "Specify arbitrary flags to pass to the Docker daemon. (format: key=value)") + startCmd.Flags().StringArrayVar(&config.DockerEnv, "docker-env", nil, "Environment variables to pass to the Docker daemon. (format: key=value)") + startCmd.Flags().StringArrayVar(&config.DockerOpt, "docker-opt", nil, "Specify arbitrary flags to pass to the Docker daemon. (format: key=value)") } // startCmd represents the start command @@ -335,7 +337,14 @@ func runStart(cmd *cobra.Command, args []string) { existingAddons = existing.Addons } } - kubeconfig, err := node.Start(mc, n, true, existingAddons) + + // Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot. + // Hence, saveConfig must be called before startHost, and again afterwards when we know the IP. + if err := config.SaveProfile(viper.GetString(config.MachineProfile), &mc); err != nil { + exit.WithError("Failed to save config", err) + } + + kubeconfig, err := cluster.InitialSetup(mc, n, existingAddons) if err != nil { exit.WithError("Starting node", err) } @@ -343,6 +352,14 @@ func runStart(cmd *cobra.Command, args []string) { if err := showKubectlInfo(kubeconfig, k8sVersion, mc.Name); err != nil { glog.Errorf("kubectl info: %v", err) } + + numNodes := viper.GetInt(nodes) + if numNodes > 1 { + for i := 0; i < numNodes-1; i++ { + nodeName := fmt.Sprintf("%s%d", n.Name, i+1) + node.Add(&mc, nodeName, false, true, "", "") + } + } } func updateDriver(driverName string) { @@ -691,7 +708,7 @@ func validateFlags(cmd *cobra.Command, drvName string) { validateCPUCount(driver.BareMetal(drvName)) // check that kubeadm extra args contain only whitelisted parameters - for param := range cluster.ExtraOptions.AsMap().Get(bsutil.Kubeadm) { + for param := range config.ExtraOptions.AsMap().Get(bsutil.Kubeadm) { if !config.ContainsParam(bsutil.KubeadmExtraArgsWhitelist[bsutil.KubeadmCmdParam], param) && !config.ContainsParam(bsutil.KubeadmExtraArgsWhitelist[bsutil.KubeadmConfigParam], param) { exit.UsageT("Sorry, the kubeadm.{{.parameter_name}} parameter is currently not supported by --extra-config", out.V{"parameter_name": param}) @@ -791,8 +808,8 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string) HyperkitVSockPorts: viper.GetStringSlice(vsockPorts), NFSShare: viper.GetStringSlice(nfsShare), NFSSharesRoot: viper.GetString(nfsSharesRoot), - DockerEnv: node.DockerEnv, - DockerOpt: node.DockerOpt, + DockerEnv: config.DockerEnv, + DockerOpt: config.DockerOpt, InsecureRegistry: insecureRegistry, RegistryMirror: registryMirror, HostOnlyCIDR: viper.GetString(hostOnlyCIDR), @@ -824,7 +841,7 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string) NetworkPlugin: selectedNetworkPlugin, ServiceCIDR: viper.GetString(serviceCIDR), ImageRepository: repository, - ExtraOptions: cluster.ExtraOptions, + ExtraOptions: config.ExtraOptions, ShouldLoadCachedImages: viper.GetBool(cacheImages), EnableDefaultCNI: selectedEnableDefaultCNI, }, @@ -846,7 +863,7 @@ func setDockerProxy() { continue } } - node.DockerEnv = append(node.DockerEnv, fmt.Sprintf("%s=%s", k, v)) + config.DockerEnv = append(config.DockerEnv, fmt.Sprintf("%s=%s", k, v)) } } } @@ -858,7 +875,7 @@ func autoSetDriverOptions(cmd *cobra.Command, drvName string) (err error) { if !cmd.Flags().Changed("extra-config") && len(hints.ExtraOptions) > 0 { for _, eo := range hints.ExtraOptions { glog.Infof("auto setting extra-config to %q.", eo) - err = cluster.ExtraOptions.Set(eo) + err = config.ExtraOptions.Set(eo) if err != nil { err = errors.Wrapf(err, "setting extra option %s", eo) } diff --git a/pkg/minikube/bootstrapper/bootstrapper.go b/pkg/minikube/bootstrapper/bootstrapper.go index d09bef5521..6bb03fa986 100644 --- a/pkg/minikube/bootstrapper/bootstrapper.go +++ b/pkg/minikube/bootstrapper/bootstrapper.go @@ -23,6 +23,7 @@ import ( "k8s.io/minikube/pkg/minikube/bootstrapper/images" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" + "k8s.io/minikube/pkg/minikube/cruntime" ) // LogOptions are options to be passed to LogCommands @@ -40,7 +41,8 @@ type Bootstrapper interface { DeleteCluster(config.KubernetesConfig) error WaitForCluster(config.ClusterConfig, time.Duration) error JoinCluster(config.ClusterConfig, config.Node, string) error - UpdateNode(config.ClusterConfig) + UpdateNode(config.ClusterConfig, config.Node, cruntime.Manager) error + GenerateToken(config.KubernetesConfig) (string, error) // LogCommands returns a map of log type to a command which will display that log. LogCommands(LogOptions) map[string]string SetupCerts(config.KubernetesConfig, config.Node) error diff --git a/pkg/minikube/bootstrapper/bsutil/ops.go b/pkg/minikube/bootstrapper/bsutil/ops.go index bf855a9210..d364aa0748 100644 --- a/pkg/minikube/bootstrapper/bsutil/ops.go +++ b/pkg/minikube/bootstrapper/bsutil/ops.go @@ -47,7 +47,7 @@ func AdjustResourceLimits(c command.Runner) error { return nil } -// ExistingConfig checks if there are config files from possible previous kubernets cluster +// ExistingConfig checks if there are config files from possible previous kubernetes cluster func ExistingConfig(c command.Runner) error { args := append([]string{"ls"}, expectedRemoteArtifacts...) _, err := c.RunCmd(exec.Command("sudo", args...)) diff --git a/pkg/minikube/bootstrapper/certs.go b/pkg/minikube/bootstrapper/certs.go index bb4bde2a38..7b634970a0 100644 --- a/pkg/minikube/bootstrapper/certs.go +++ b/pkg/minikube/bootstrapper/certs.go @@ -121,8 +121,10 @@ func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node) return errors.Wrap(err, "encoding kubeconfig") } - kubeCfgFile := assets.NewMemoryAsset(data, vmpath.GuestPersistentDir, "kubeconfig", "0644") - copyableFiles = append(copyableFiles, kubeCfgFile) + if n.ControlPlane { + kubeCfgFile := assets.NewMemoryAsset(data, vmpath.GuestPersistentDir, "kubeconfig", "0644") + copyableFiles = append(copyableFiles, kubeCfgFile) + } for _, f := range copyableFiles { if err := cmd.Copy(f); err != nil { diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 5af39eac9b..663e538f8c 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -381,6 +381,20 @@ func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinC return nil } +// GenerateToken creates a token and returns the appropriate kubeadm join command to run +func (k *Bootstrapper) GenerateToken(k8s config.KubernetesConfig) (string, error) { + tokenCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s token create --print-join-command --ttl=0", bsutil.InvokeKubeadm(k8s.KubernetesVersion))) + r, err := k.c.RunCmd(tokenCmd) + if err != nil { + return "", errors.Wrap(err, "generating bootstrap token") + } + joinCmd := r.Stdout.String() + joinCmd = strings.Replace(joinCmd, "kubeadm", bsutil.InvokeKubeadm(k8s.KubernetesVersion), 1) + joinCmd = fmt.Sprintf("%s --ignore-preflight-errors=all", strings.TrimSpace(joinCmd)) + + return joinCmd, nil +} + // DeleteCluster removes the components that were started earlier func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error { version, err := bsutil.ParseKubernetesVersion(k8s.KubernetesVersion) @@ -405,7 +419,7 @@ func (k *Bootstrapper) SetupCerts(k8s config.KubernetesConfig, n config.Node) er return bootstrapper.SetupCerts(k.c, k8s, n) } -// UpdateCluster updates the cluster +// UpdateCluster updates the cluster. func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error { images, err := images.Kubeadm(cfg.KubernetesConfig.ImageRepository, cfg.KubernetesConfig.KubernetesVersion) if err != nil { @@ -423,14 +437,24 @@ func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error { return errors.Wrap(err, "runtime") } - // TODO: multiple nodes - kubeadmCfg, err := bsutil.GenerateKubeadmYAML(cfg, r, cfg.Nodes[0]) + for _, n := range cfg.Nodes { + err := k.UpdateNode(cfg, n, r) + if err != nil { + return errors.Wrap(err, "updating node") + } + } + + return nil +} + +// UpdateNode updates a node. +func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cruntime.Manager) error { + kubeadmCfg, err := bsutil.GenerateKubeadmYAML(cfg, r, n) if err != nil { return errors.Wrap(err, "generating kubeadm cfg") } - // TODO: multiple nodes - kubeletCfg, err := bsutil.NewKubeletConfig(cfg, cfg.Nodes[0], r) + kubeletCfg, err := bsutil.NewKubeletConfig(cfg, n, r) if err != nil { return errors.Wrap(err, "generating kubelet config") } diff --git a/pkg/minikube/cluster/cluster.go b/pkg/minikube/cluster/cluster.go index baafe689a7..a2b9e06613 100644 --- a/pkg/minikube/cluster/cluster.go +++ b/pkg/minikube/cluster/cluster.go @@ -26,12 +26,9 @@ import ( "k8s.io/minikube/pkg/minikube/bootstrapper" "k8s.io/minikube/pkg/minikube/bootstrapper/kubeadm" - "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/exit" ) -var ExtraOptions config.ExtraOptionSlice - // This init function is used to set the logtostderr variable to false so that INFO level log info does not clutter the CLI // INFO lvl logging is displayed due to the kubernetes api calling flag.Set("logtostderr", "true") in its init() // see: https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/util/logs/logs.go#L32-L34 diff --git a/pkg/minikube/cluster/setup.go b/pkg/minikube/cluster/setup.go index 9ce35bcf9c..d786312820 100644 --- a/pkg/minikube/cluster/setup.go +++ b/pkg/minikube/cluster/setup.go @@ -17,23 +17,33 @@ limitations under the License. package cluster import ( + "fmt" + "net" + "os" + "os/exec" "strconv" "strings" + "time" "github.com/docker/machine/libmachine" "github.com/docker/machine/libmachine/host" + "github.com/golang/glog" "github.com/spf13/viper" cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" + "k8s.io/minikube/pkg/addons" "k8s.io/minikube/pkg/minikube/bootstrapper" + "k8s.io/minikube/pkg/minikube/bootstrapper/images" + "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" - "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/kubeconfig" "k8s.io/minikube/pkg/minikube/localpath" - "k8s.io/minikube/pkg/minikube/logs" + "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/proxy" + "k8s.io/minikube/pkg/util/retry" ) const ( @@ -41,11 +51,13 @@ const ( waitUntilHealthy = "wait" embedCerts = "embed-certs" keepContext = "keep-context" + imageRepository = "image-repository" + containerRuntime = "container-runtime" ) // InitialSetup performs all necessary operations on the initial control plane node when first spinning up a cluster -func InitialSetup(cc config.ClusterConfig, n config.Node, cr cruntime.Manager) (*kubeconfig.Settings, error) { - mRunner, preExists, machineAPI, host := StartMachine(&cc, &n) +func InitialSetup(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool) (*kubeconfig.Settings, error) { + _, preExists, machineAPI, host := StartMachine(&cc, &n) defer machineAPI.Close() // Must be written before bootstrap, otherwise health checks may flake due to stale IP @@ -59,8 +71,17 @@ func InitialSetup(cc config.ClusterConfig, n config.Node, cr cruntime.Manager) ( // pull images or restart cluster out.T(out.Launch, "Launching Kubernetes ... ") - if err := bs.StartCluster(cc); err != nil { - exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, mRunner)) + err = bs.StartCluster(cc) + if err != nil { + /*config := cruntime.Config{Type: viper.GetString(containerRuntime), Runner: mRunner, ImageRepository: cc.KubernetesConfig.ImageRepository, KubernetesVersion: cc.KubernetesConfig.KubernetesVersion} + cr, err := cruntime.New(config) + exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, mRunner))*/ + exit.WithError("Error starting cluster", err) + } + + // enable addons, both old and new! + if existingAddons != nil { + addons.Start(viper.GetString(config.MachineProfile), existingAddons, config.AddonList) } // Skip pre-existing, because we already waited for health @@ -80,7 +101,7 @@ func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node) if err != nil { exit.WithError("Failed to get bootstrapper", err) } - for _, eo := range ExtraOptions { + for _, eo := range config.ExtraOptions { out.T(out.Option, "{{.extra_option_component_name}}.{{.key}}={{.value}}", out.V{"extra_option_component_name": eo.Component, "key": eo.Key, "value": eo.Value}) } // Loads cached images, generates config files, download binaries @@ -122,3 +143,145 @@ func setupKubeconfig(h *host.Host, c *config.ClusterConfig, n *config.Node, clus } return kcs, nil } + +// StartMachine starts a VM +func StartMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) { + m, err := machine.NewAPIClient() + if err != nil { + exit.WithError("Failed to get machine client", err) + } + host, preExists = startHost(m, *cfg, *node) + runner, err = machine.CommandRunner(host) + if err != nil { + exit.WithError("Failed to get command runner", err) + } + + ip := validateNetwork(host, runner) + + // Bypass proxy for minikube's vm host ip + err = proxy.ExcludeIP(ip) + if err != nil { + out.ErrT(out.FailureType, "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.", out.V{"ip": ip}) + } + + node.IP = ip + config.SaveNodeToProfile(cfg, node) + + return runner, preExists, m, host +} + +// startHost starts a new minikube host using a VM or None +func startHost(api libmachine.API, mc config.ClusterConfig, n config.Node) (*host.Host, bool) { + exists, err := api.Exists(n.Name) + if err != nil { + exit.WithError("Failed to check if machine exists", err) + } + + host, err := machine.StartHost(api, mc, n) + if err != nil { + exit.WithError("Unable to start VM. Please investigate and run 'minikube delete' if possible", err) + } + return host, exists +} + +// validateNetwork tries to catch network problems as soon as possible +func validateNetwork(h *host.Host, r command.Runner) string { + ip, err := h.Driver.GetIP() + if err != nil { + exit.WithError("Unable to get VM IP address", err) + } + + optSeen := false + warnedOnce := false + for _, k := range proxy.EnvVars { + if v := os.Getenv(k); v != "" { + if !optSeen { + out.T(out.Internet, "Found network options:") + optSeen = true + } + out.T(out.Option, "{{.key}}={{.value}}", out.V{"key": k, "value": v}) + ipExcluded := proxy.IsIPExcluded(ip) // Skip warning if minikube ip is already in NO_PROXY + k = strings.ToUpper(k) // for http_proxy & https_proxy + if (k == "HTTP_PROXY" || k == "HTTPS_PROXY") && !ipExcluded && !warnedOnce { + out.WarningT("You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details", out.V{"ip_address": ip, "documentation_url": "https://minikube.sigs.k8s.io/docs/reference/networking/proxy/"}) + warnedOnce = true + } + } + } + + if !driver.BareMetal(h.Driver.DriverName()) && !driver.IsKIC(h.Driver.DriverName()) { + trySSH(h, ip) + } + + tryLookup(r) + tryRegistry(r) + return ip +} + +func trySSH(h *host.Host, ip string) { + if viper.GetBool("force") { + return + } + + sshAddr := net.JoinHostPort(ip, "22") + + dial := func() (err error) { + d := net.Dialer{Timeout: 3 * time.Second} + conn, err := d.Dial("tcp", sshAddr) + if err != nil { + out.WarningT("Unable to verify SSH connectivity: {{.error}}. Will retry...", out.V{"error": err}) + return err + } + _ = conn.Close() + return nil + } + + if err := retry.Expo(dial, time.Second, 13*time.Second); err != nil { + exit.WithCodeT(exit.IO, `minikube is unable to connect to the VM: {{.error}} + + This is likely due to one of two reasons: + + - VPN or firewall interference + - {{.hypervisor}} network configuration issue + + Suggested workarounds: + + - Disable your local VPN or firewall software + - Configure your local VPN or firewall to allow access to {{.ip}} + - Restart or reinstall {{.hypervisor}} + - Use an alternative --vm-driver + - Use --force to override this connectivity check + `, out.V{"error": err, "hypervisor": h.Driver.DriverName(), "ip": ip}) + } +} + +func tryLookup(r command.Runner) { + // DNS check + if rr, err := r.RunCmd(exec.Command("nslookup", "kubernetes.io", "-type=ns")); err != nil { + glog.Infof("%s failed: %v which might be okay will retry nslookup without query type", rr.Args, err) + // will try with without query type for ISOs with different busybox versions. + if _, err = r.RunCmd(exec.Command("nslookup", "kubernetes.io")); err != nil { + glog.Warningf("nslookup failed: %v", err) + out.WarningT("Node may be unable to resolve external DNS records") + } + } +} +func tryRegistry(r command.Runner) { + // Try an HTTPS connection to the image repository + proxy := os.Getenv("HTTPS_PROXY") + opts := []string{"-sS"} + if proxy != "" && !strings.HasPrefix(proxy, "localhost") && !strings.HasPrefix(proxy, "127.0") { + opts = append([]string{"-x", proxy}, opts...) + } + + repo := viper.GetString(imageRepository) + if repo == "" { + repo = images.DefaultKubernetesRepo + } + + opts = append(opts, fmt.Sprintf("https://%s/", repo)) + if rr, err := r.RunCmd(exec.Command("curl", opts...)); err != nil { + glog.Warningf("%s failed: %v", rr.Args, err) + out.WarningT("VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository", out.V{"repository": repo}) + } +} diff --git a/pkg/minikube/config/node.go b/pkg/minikube/config/node.go index 1c6f050159..572a182553 100644 --- a/pkg/minikube/config/node.go +++ b/pkg/minikube/config/node.go @@ -16,6 +16,17 @@ limitations under the License. package config +var ( + // DockerEnv contains the environment variables + DockerEnv []string + // DockerOpt contains the option parameters + DockerOpt []string + // ExtraOptions contains extra options (if any) + ExtraOptions ExtraOptionSlice + // AddonList contains the list of addons + AddonList []string +) + // AddNode adds a new node config to an existing cluster. func AddNode(cc *ClusterConfig, name string, controlPlane bool, k8sVersion string, profileName string) error { node := Node{ diff --git a/pkg/minikube/config/profile.go b/pkg/minikube/config/profile.go index 5ad8572e13..0acfe1a8b4 100644 --- a/pkg/minikube/config/profile.go +++ b/pkg/minikube/config/profile.go @@ -25,6 +25,7 @@ import ( "strings" "github.com/golang/glog" + "github.com/spf13/viper" "k8s.io/minikube/pkg/drivers/kic/oci" "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/util/lock" @@ -90,6 +91,23 @@ func CreateEmptyProfile(name string, miniHome ...string) error { return SaveProfile(name, cfg, miniHome...) } +// SaveNodeToProfile saves a node to a cluster +func SaveNodeToProfile(cfg *ClusterConfig, node *Node) error { + update := false + for i, n := range cfg.Nodes { + if n.Name == node.Name { + cfg.Nodes[i] = *node + update = true + break + } + } + + if !update { + cfg.Nodes = append(cfg.Nodes, *node) + } + return SaveProfile(viper.GetString(MachineProfile), cfg) +} + // SaveProfile creates an profile out of the cfg and stores in $MINIKUBE_HOME/profiles//config.json func SaveProfile(name string, cfg *ClusterConfig, miniHome ...string) error { data, err := json.MarshalIndent(cfg, "", " ") diff --git a/pkg/minikube/machine/cluster_test.go b/pkg/minikube/machine/cluster_test.go index 3b84c58842..ad326611c0 100644 --- a/pkg/minikube/machine/cluster_test.go +++ b/pkg/minikube/machine/cluster_test.go @@ -130,8 +130,10 @@ func TestStartHostExists(t *testing.T) { mc := defaultClusterConfig mc.Name = ih.Name + + n := config.Node{Name: ih.Name} // This should pass without calling Create because the host exists already. - h, err := StartHost(api, mc) + h, err := StartHost(api, mc, n) if err != nil { t.Fatalf("Error starting host: %v", err) } @@ -162,8 +164,10 @@ func TestStartHostErrMachineNotExist(t *testing.T) { mc := defaultClusterConfig mc.Name = h.Name + n := config.Node{Name: h.Name} + // This should pass with creating host, while machine does not exist. - h, err = StartHost(api, mc) + h, err = StartHost(api, mc, n) if err != nil { if err != ErrorMachineNotExist { t.Fatalf("Error starting host: %v", err) @@ -172,8 +176,10 @@ func TestStartHostErrMachineNotExist(t *testing.T) { mc.Name = h.Name + n.Name = h.Name + // Second call. This should pass without calling Create because the host exists already. - h, err = StartHost(api, mc) + h, err = StartHost(api, mc, n) if err != nil { t.Fatalf("Error starting host: %v", err) } @@ -205,7 +211,10 @@ func TestStartStoppedHost(t *testing.T) { provision.SetDetector(md) mc := defaultClusterConfig mc.Name = h.Name - h, err = StartHost(api, mc) + + n := config.Node{Name: h.Name} + + h, err = StartHost(api, mc, n) if err != nil { t.Fatal("Error starting host.") } @@ -233,7 +242,9 @@ func TestStartHost(t *testing.T) { md := &tests.MockDetector{Provisioner: &tests.MockProvisioner{}} provision.SetDetector(md) - h, err := StartHost(api, defaultClusterConfig) + n := config.Node{Name: viper.GetString("profile")} + + h, err := StartHost(api, defaultClusterConfig, n) if err != nil { t.Fatal("Error starting host.") } @@ -261,14 +272,16 @@ func TestStartHostConfig(t *testing.T) { md := &tests.MockDetector{Provisioner: &tests.MockProvisioner{}} provision.SetDetector(md) - config := config.ClusterConfig{ + cfg := config.ClusterConfig{ Driver: driver.Mock, DockerEnv: []string{"FOO=BAR"}, DockerOpt: []string{"param=value"}, Downloader: MockDownloader{}, } - h, err := StartHost(api, config) + n := config.Node{Name: viper.GetString("profile")} + + h, err := StartHost(api, cfg, n) if err != nil { t.Fatal("Error starting host.") } diff --git a/pkg/minikube/machine/fix.go b/pkg/minikube/machine/fix.go index a041e6a84f..8ea159a124 100644 --- a/pkg/minikube/machine/fix.go +++ b/pkg/minikube/machine/fix.go @@ -54,16 +54,16 @@ var ( ) // fixHost fixes up a previously configured VM so that it is ready to run Kubernetes -func fixHost(api libmachine.API, mc config.ClusterConfig) (*host.Host, error) { +func fixHost(api libmachine.API, mc config.ClusterConfig, n config.Node) (*host.Host, error) { out.T(out.Waiting, "Reconfiguring existing host ...") start := time.Now() - glog.Infof("fixHost starting: %s", mc.Name) + glog.Infof("fixHost starting: %s", n.Name) defer func() { glog.Infof("fixHost completed within %s", time.Since(start)) }() - h, err := api.Load(mc.Name) + h, err := api.Load(n.Name) if err != nil { return h, errors.Wrap(err, "Error loading existing host. Please try running [minikube delete], then run [minikube start] again.") } diff --git a/pkg/minikube/machine/start.go b/pkg/minikube/machine/start.go index c5cd2fa11f..4ae16dc005 100644 --- a/pkg/minikube/machine/start.go +++ b/pkg/minikube/machine/start.go @@ -61,9 +61,9 @@ var ( ) // StartHost starts a host VM. -func StartHost(api libmachine.API, cfg config.ClusterConfig) (*host.Host, error) { +func StartHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*host.Host, error) { // Prevent machine-driver boot races, as well as our own certificate race - releaser, err := acquireMachinesLock(cfg.Name) + releaser, err := acquireMachinesLock(n.Name) if err != nil { return nil, errors.Wrap(err, "boot lock") } @@ -73,16 +73,16 @@ func StartHost(api libmachine.API, cfg config.ClusterConfig) (*host.Host, error) releaser.Release() }() - exists, err := api.Exists(cfg.Name) + exists, err := api.Exists(n.Name) if err != nil { - return nil, errors.Wrapf(err, "exists: %s", cfg.Name) + return nil, errors.Wrapf(err, "exists: %s", n.Name) } if !exists { - glog.Infof("Provisioning new machine with config: %+v", cfg) + glog.Infof("Provisioning new machine with config: %+v", n) return createHost(api, cfg) } glog.Infoln("Skipping create...Using existing machine configuration") - return fixHost(api, cfg) + return fixHost(api, cfg, n) } func engineOptions(cfg config.ClusterConfig) *engine.Options { diff --git a/pkg/minikube/node/config.go b/pkg/minikube/node/config.go index 57ab7bf8fd..3448b29ce0 100644 --- a/pkg/minikube/node/config.go +++ b/pkg/minikube/node/config.go @@ -35,17 +35,6 @@ import ( "k8s.io/minikube/pkg/util/lock" ) -var ( - // DockerEnv contains the environment variables - DockerEnv []string - // DockerOpt contains the option parameters - DockerOpt []string - // ExtraOptions contains extra options (if any) - ExtraOptions config.ExtraOptionSlice - // AddonList contains the list of addons - AddonList []string -) - // configureRuntimes does what needs to happen to get a runtime going. func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config.KubernetesConfig) cruntime.Manager { config := cruntime.Config{Type: viper.GetString(containerRuntime), Runner: runner, ImageRepository: k8s.ImageRepository, KubernetesVersion: k8s.KubernetesVersion} @@ -69,66 +58,14 @@ func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config func showVersionInfo(k8sVersion string, cr cruntime.Manager) { version, _ := cr.Version() out.T(cr.Style(), "Preparing Kubernetes {{.k8sVersion}} on {{.runtime}} {{.runtimeVersion}} ...", out.V{"k8sVersion": k8sVersion, "runtime": cr.Name(), "runtimeVersion": version}) - for _, v := range DockerOpt { + for _, v := range config.DockerOpt { out.T(out.Option, "opt {{.docker_option}}", out.V{"docker_option": v}) } - for _, v := range DockerEnv { + for _, v := range config.DockerEnv { out.T(out.Option, "env {{.docker_env}}", out.V{"docker_env": v}) } } -<<<<<<< HEAD -======= -// setupKubeAdm adds any requested files into the VM before Kubernetes is started -func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, node config.Node) bootstrapper.Bootstrapper { - bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper)) - if err != nil { - exit.WithError("Failed to get bootstrapper", err) - } - for _, eo := range ExtraOptions { - out.T(out.Option, "{{.extra_option_component_name}}.{{.key}}={{.value}}", out.V{"extra_option_component_name": eo.Component, "key": eo.Key, "value": eo.Value}) - } - // Loads cached images, generates config files, download binaries - if err := bs.UpdateCluster(cfg); err != nil { - exit.WithError("Failed to update cluster", err) - } - if err := bs.SetupCerts(cfg.KubernetesConfig, node); err != nil { - exit.WithError("Failed to setup certs", err) - } - return bs -} - -func setupKubeconfig(h *host.Host, c *config.ClusterConfig, n *config.Node, clusterName string) (*kubeconfig.Settings, error) { - addr, err := h.Driver.GetURL() - if err != nil { - exit.WithError("Failed to get driver URL", err) - } - if !driver.IsKIC(h.DriverName) { - addr = strings.Replace(addr, "tcp://", "https://", -1) - addr = strings.Replace(addr, ":2376", ":"+strconv.Itoa(n.Port), -1) - } - - if c.KubernetesConfig.APIServerName != constants.APIServerName { - addr = strings.Replace(addr, n.IP, c.KubernetesConfig.APIServerName, -1) - } - kcs := &kubeconfig.Settings{ - ClusterName: clusterName, - ClusterServerAddress: addr, - ClientCertificate: localpath.MakeMiniPath("client.crt"), - ClientKey: localpath.MakeMiniPath("client.key"), - CertificateAuthority: localpath.MakeMiniPath("ca.crt"), - KeepContext: viper.GetBool(keepContext), - EmbedCerts: viper.GetBool(embedCerts), - } - - kcs.SetPath(kubeconfig.PathFromEnv()) - if err := kubeconfig.Update(kcs); err != nil { - return kcs, err - } - return kcs, nil -} - ->>>>>>> c4e2236e2b2966cb05fa11b3bdc8cf1d060a270c // configureMounts configures any requested filesystem mounts func configureMounts() { if !viper.GetBool(createMount) { diff --git a/pkg/minikube/node/machine.go b/pkg/minikube/node/machine.go deleted file mode 100644 index 279e233c2a..0000000000 --- a/pkg/minikube/node/machine.go +++ /dev/null @@ -1,185 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package node - -import ( - "fmt" - "net" - "os" - "os/exec" - "strings" - "time" - - "github.com/docker/machine/libmachine" - "github.com/docker/machine/libmachine/host" - "github.com/golang/glog" - "github.com/spf13/viper" - "k8s.io/minikube/pkg/minikube/bootstrapper/images" - "k8s.io/minikube/pkg/minikube/command" - "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/driver" - "k8s.io/minikube/pkg/minikube/exit" - "k8s.io/minikube/pkg/minikube/machine" - "k8s.io/minikube/pkg/minikube/out" - "k8s.io/minikube/pkg/minikube/proxy" - "k8s.io/minikube/pkg/util/retry" -) - -// StartMachine starts a VM -func StartMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) { - m, err := machine.NewAPIClient() - if err != nil { - exit.WithError("Failed to get machine client", err) - } - host, preExists = startHost(m, *cfg) - runner, err = machine.CommandRunner(host) - if err != nil { - exit.WithError("Failed to get command runner", err) - } - - ip := validateNetwork(host, runner) - - // Bypass proxy for minikube's vm host ip - err = proxy.ExcludeIP(ip) - if err != nil { - out.ErrT(out.FailureType, "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.", out.V{"ip": ip}) - } - // Save IP to configuration file for subsequent use - node.IP = ip - - if err := Save(cfg, node); err != nil { - exit.WithError("Failed to save config", err) - } - - return runner, preExists, m, host -} - -// startHost starts a new minikube host using a VM or None -func startHost(api libmachine.API, mc config.ClusterConfig) (*host.Host, bool) { - exists, err := api.Exists(mc.Name) - if err != nil { - exit.WithError("Failed to check if machine exists", err) - } - - host, err := machine.StartHost(api, mc) - if err != nil { - exit.WithError("Unable to start VM. Please investigate and run 'minikube delete' if possible", err) - } - return host, exists -} - -// validateNetwork tries to catch network problems as soon as possible -func validateNetwork(h *host.Host, r command.Runner) string { - ip, err := h.Driver.GetIP() - if err != nil { - exit.WithError("Unable to get VM IP address", err) - } - - optSeen := false - warnedOnce := false - for _, k := range proxy.EnvVars { - if v := os.Getenv(k); v != "" { - if !optSeen { - out.T(out.Internet, "Found network options:") - optSeen = true - } - out.T(out.Option, "{{.key}}={{.value}}", out.V{"key": k, "value": v}) - ipExcluded := proxy.IsIPExcluded(ip) // Skip warning if minikube ip is already in NO_PROXY - k = strings.ToUpper(k) // for http_proxy & https_proxy - if (k == "HTTP_PROXY" || k == "HTTPS_PROXY") && !ipExcluded && !warnedOnce { - out.WarningT("You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details", out.V{"ip_address": ip, "documentation_url": "https://minikube.sigs.k8s.io/docs/reference/networking/proxy/"}) - warnedOnce = true - } - } - } - - if !driver.BareMetal(h.Driver.DriverName()) && !driver.IsKIC(h.Driver.DriverName()) { - trySSH(h, ip) - } - - tryLookup(r) - tryRegistry(r) - return ip -} - -func trySSH(h *host.Host, ip string) { - if viper.GetBool("force") { - return - } - - sshAddr := net.JoinHostPort(ip, "22") - - dial := func() (err error) { - d := net.Dialer{Timeout: 3 * time.Second} - conn, err := d.Dial("tcp", sshAddr) - if err != nil { - out.WarningT("Unable to verify SSH connectivity: {{.error}}. Will retry...", out.V{"error": err}) - return err - } - _ = conn.Close() - return nil - } - - if err := retry.Expo(dial, time.Second, 13*time.Second); err != nil { - exit.WithCodeT(exit.IO, `minikube is unable to connect to the VM: {{.error}} - - This is likely due to one of two reasons: - - - VPN or firewall interference - - {{.hypervisor}} network configuration issue - - Suggested workarounds: - - - Disable your local VPN or firewall software - - Configure your local VPN or firewall to allow access to {{.ip}} - - Restart or reinstall {{.hypervisor}} - - Use an alternative --vm-driver - - Use --force to override this connectivity check - `, out.V{"error": err, "hypervisor": h.Driver.DriverName(), "ip": ip}) - } -} - -func tryLookup(r command.Runner) { - // DNS check - if rr, err := r.RunCmd(exec.Command("nslookup", "kubernetes.io", "-type=ns")); err != nil { - glog.Infof("%s failed: %v which might be okay will retry nslookup without query type", rr.Args, err) - // will try with without query type for ISOs with different busybox versions. - if _, err = r.RunCmd(exec.Command("nslookup", "kubernetes.io")); err != nil { - glog.Warningf("nslookup failed: %v", err) - out.WarningT("Node may be unable to resolve external DNS records") - } - } -} -func tryRegistry(r command.Runner) { - // Try an HTTPS connection to the image repository - proxy := os.Getenv("HTTPS_PROXY") - opts := []string{"-sS"} - if proxy != "" && !strings.HasPrefix(proxy, "localhost") && !strings.HasPrefix(proxy, "127.0") { - opts = append([]string{"-x", proxy}, opts...) - } - - repo := viper.GetString(imageRepository) - if repo == "" { - repo = images.DefaultKubernetesRepo - } - - opts = append(opts, fmt.Sprintf("https://%s/", repo)) - if rr, err := r.RunCmd(exec.Command("curl", opts...)); err != nil { - glog.Warningf("%s failed: %v", rr.Args, err) - out.WarningT("VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository", out.V{"repository": repo}) - } -} diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index 4e2f75b94b..ac247b0ce3 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -25,7 +25,6 @@ import ( ) const ( - imageRepository = "image-repository" cacheImages = "cache-images" waitUntilHealthy = "wait" cacheImageConfigKey = "cache" @@ -38,7 +37,7 @@ const ( ) // Add adds a new node config to an existing cluster. -func Add(cc *config.ClusterConfig, name string, controlPlane bool, worker bool, k8sVersion string, profileName string) (*config.Node, error) { +func Add(cc *config.ClusterConfig, name string, controlPlane bool, worker bool, k8sVersion string, profileName string) error { n := config.Node{ Name: name, Worker: true, @@ -62,11 +61,12 @@ func Add(cc *config.ClusterConfig, name string, controlPlane bool, worker bool, cc.Nodes = append(cc.Nodes, n) err := config.SaveProfile(profileName, cc) if err != nil { - return nil, err + return err } - _, err = Start(*cc, n, false, nil) - return &n, err + err = Start(*cc, n, nil) + + return err } // Delete stops and deletes the given node from the given cluster diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 36f1155f2f..d43d9f455d 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -21,6 +21,7 @@ import ( "github.com/spf13/viper" "golang.org/x/sync/errgroup" + cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" "k8s.io/minikube/pkg/addons" "k8s.io/minikube/pkg/minikube/cluster" "k8s.io/minikube/pkg/minikube/config" @@ -32,25 +33,25 @@ import ( ) // Start spins up a guest and starts the kubernetes node. -func Start(mc config.ClusterConfig, n config.Node, preExists bool, existingAddons map[string]bool) error { +func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool) error { // Now that the ISO is downloaded, pull images in the background while the VM boots. var cacheGroup errgroup.Group - beginCacheRequiredImages(&cacheGroup, mc.KubernetesConfig.ImageRepository, n.KubernetesVersion) + beginCacheRequiredImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion) - // Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot. - // Hence, saveConfig must be called before startHost, and again afterwards when we know the IP. - if err := config.SaveProfile(viper.GetString(config.MachineProfile), &mc); err != nil { - exit.WithError("Failed to save config", err) + runner, preExists, mAPI, _ := cluster.StartMachine(&cc, &n) + defer mAPI.Close() + + bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), n.Name) + if err != nil { + exit.WithError("Failed to get bootstrapper", err) } - bs, err := cluster.Bootstrapper() - - k8sVersion := mc.KubernetesConfig.KubernetesVersion - driverName := mc.Driver + k8sVersion := cc.KubernetesConfig.KubernetesVersion + driverName := cc.Driver // exits here in case of --download-only option. handleDownloadOnly(&cacheGroup, k8sVersion) // configure the runtime (docker, containerd, crio) - cr := configureRuntimes(mRunner, driverName, mc.KubernetesConfig) + cr := configureRuntimes(runner, driverName, cc.KubernetesConfig) showVersionInfo(k8sVersion, cr) waitCacheRequiredImages(&cacheGroup) @@ -58,7 +59,11 @@ func Start(mc config.ClusterConfig, n config.Node, preExists bool, existingAddon // enable addons, both old and new! if existingAddons != nil { - addons.Start(viper.GetString(config.MachineProfile), existingAddons, AddonList) + addons.Start(viper.GetString(config.MachineProfile), existingAddons, config.AddonList) + } + + if err := bs.UpdateNode(cc, n, cr); err != nil { + exit.WithError("Failed to update node", err) } if err := CacheAndLoadImagesInConfig(); err != nil { @@ -66,18 +71,30 @@ func Start(mc config.ClusterConfig, n config.Node, preExists bool, existingAddon } // special ops for none , like change minikube directory. - if driverName == driver.None { + // multinode super doesn't work on the none driver + if driverName == driver.None && len(cc.Nodes) == 1 { prepareNone() } // Skip pre-existing, because we already waited for health if viper.GetBool(waitUntilHealthy) && !preExists { - if err := bs.WaitForCluster(mc, viper.GetDuration(waitTimeout)); err != nil { + if err := bs.WaitForCluster(cc, viper.GetDuration(waitTimeout)); err != nil { exit.WithError("Wait failed", err) } } - return nil + bs.SetupCerts(cc.KubernetesConfig, n) + + cp, err := config.PrimaryControlPlane(cc) + if err != nil { + exit.WithError("Getting primary control plane", err) + } + cpBs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cp.Name) + if err != nil { + exit.WithError("Getting bootstrapper", err) + } + joinCmd, err := cpBs.GenerateToken(cc.KubernetesConfig) + return bs.JoinCluster(cc, n, joinCmd) } // prepareNone prepares the user and host for the joy of the "none" driver From 9a3ecab61a67ef6fd179125c47d38dcb84573b0d Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 24 Feb 2020 15:09:04 -0800 Subject: [PATCH 05/42] fixed more stuff --- cmd/minikube/cmd/node.go | 7 ++- cmd/minikube/cmd/node_add.go | 4 ++ cmd/minikube/cmd/node_delete.go | 2 +- pkg/drivers/hyperkit/driver.go | 4 +- pkg/minikube/config/config.go | 8 ++++ pkg/minikube/config/node.go | 47 ------------------- pkg/minikube/machine/cache_images.go | 44 ++++++++--------- pkg/minikube/machine/cluster_test.go | 43 ++++++++++------- pkg/minikube/machine/fix.go | 2 +- pkg/minikube/machine/start.go | 12 ++--- pkg/minikube/node/node.go | 21 +-------- pkg/minikube/node/start.go | 5 ++ pkg/minikube/registry/drvs/docker/docker.go | 6 +-- .../registry/drvs/hyperkit/hyperkit.go | 27 ++++++----- .../registry/drvs/parallels/parallels.go | 14 +++--- pkg/minikube/registry/drvs/podman/podman.go | 6 +-- .../registry/drvs/virtualbox/virtualbox.go | 4 +- pkg/minikube/registry/drvs/vmware/vmware.go | 4 +- .../drvs/vmwarefusion/vmwarefusion.go | 14 +++--- pkg/minikube/registry/registry.go | 2 +- 20 files changed, 120 insertions(+), 156 deletions(-) delete mode 100644 pkg/minikube/config/node.go diff --git a/cmd/minikube/cmd/node.go b/cmd/minikube/cmd/node.go index 7b70780f74..39dbac6c7c 100644 --- a/cmd/minikube/cmd/node.go +++ b/cmd/minikube/cmd/node.go @@ -23,10 +23,9 @@ import ( // nodeCmd represents the set of node subcommands var nodeCmd = &cobra.Command{ - Use: "node", - Short: "Node operations", - Long: "Operations on nodes", - Hidden: true, // This won't be fully functional and thus should not be documented yet + Use: "node", + Short: "Node operations", + Long: "Operations on nodes", Run: func(cmd *cobra.Command, args []string) { exit.UsageT("Usage: minikube node [add|start|stop|delete]") }, diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index cf3a1c626e..fe5557258d 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -47,6 +47,10 @@ var nodeAddCmd = &cobra.Command{ if nodeName == "" { name = profile + strconv.Itoa(len(mc.Nodes)+1) } + _, _, err = node.Retrieve(mc, name) + if err == nil { + exit.WithCodeT(100, "{{.nodeName}} already exists in cluster {{.cluster}}. Choose a different name.", out.V{"nodeName": name, "cluster": mc.Name}) + } out.T(out.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": profile}) err = node.Add(mc, name, cp, worker, "", profile) diff --git a/cmd/minikube/cmd/node_delete.go b/cmd/minikube/cmd/node_delete.go index 92e5e5755d..33d6ca6660 100644 --- a/cmd/minikube/cmd/node_delete.go +++ b/cmd/minikube/cmd/node_delete.go @@ -46,7 +46,7 @@ var nodeDeleteCmd = &cobra.Command{ err = node.Delete(*cc, name) if err != nil { - out.FatalT("Failed to delete node {{.name}}", out.V{"name": name}) + exit.WithError("deleting node", err) } out.T(out.Deleted, "Node {{.name}} was successfully deleted.", out.V{"name": name}) diff --git a/pkg/drivers/hyperkit/driver.go b/pkg/drivers/hyperkit/driver.go index bf775240e6..9659237408 100644 --- a/pkg/drivers/hyperkit/driver.go +++ b/pkg/drivers/hyperkit/driver.go @@ -65,6 +65,7 @@ type Driver struct { UUID string VpnKitSock string VSockPorts []string + ClusterName string } // NewDriver creates a new driver for a host @@ -199,7 +200,7 @@ func (d *Driver) Restart() error { } func (d *Driver) createHost() (*hyperkit.HyperKit, error) { - stateDir := filepath.Join(d.StorePath, "machines", d.MachineName) + stateDir := filepath.Join(d.StorePath, "machines", d.ClusterName, d.MachineName) h, err := hyperkit.New("", d.VpnKitSock, stateDir) if err != nil { return nil, errors.Wrap(err, "new-ing Hyperkit") @@ -519,6 +520,7 @@ func (d *Driver) sendSignal(s os.Signal) error { func (d *Driver) getPid() int { pidPath := d.ResolveStorePath(machineFileName) + log.Debugf("PIDPATH=%s", pidPath) f, err := os.Open(pidPath) if err != nil { log.Warnf("Error reading pid file: %v", err) diff --git a/pkg/minikube/config/config.go b/pkg/minikube/config/config.go index 351d6a177d..bf7e0bdd28 100644 --- a/pkg/minikube/config/config.go +++ b/pkg/minikube/config/config.go @@ -52,6 +52,14 @@ const ( var ( // ErrKeyNotFound is the error returned when a key doesn't exist in the config file ErrKeyNotFound = errors.New("specified key could not be found in config") + // DockerEnv contains the environment variables + DockerEnv []string + // DockerOpt contains the option parameters + DockerOpt []string + // ExtraOptions contains extra options (if any) + ExtraOptions ExtraOptionSlice + // AddonList contains the list of addons + AddonList []string ) // ErrNotExist is the error returned when a config does not exist diff --git a/pkg/minikube/config/node.go b/pkg/minikube/config/node.go deleted file mode 100644 index 572a182553..0000000000 --- a/pkg/minikube/config/node.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -var ( - // DockerEnv contains the environment variables - DockerEnv []string - // DockerOpt contains the option parameters - DockerOpt []string - // ExtraOptions contains extra options (if any) - ExtraOptions ExtraOptionSlice - // AddonList contains the list of addons - AddonList []string -) - -// AddNode adds a new node config to an existing cluster. -func AddNode(cc *ClusterConfig, name string, controlPlane bool, k8sVersion string, profileName string) error { - node := Node{ - Name: name, - Worker: true, - } - - if controlPlane { - node.ControlPlane = true - } - - if k8sVersion != "" { - node.KubernetesVersion = k8sVersion - } - - cc.Nodes = append(cc.Nodes, node) - return SaveProfile(profileName, cc) -} diff --git a/pkg/minikube/machine/cache_images.go b/pkg/minikube/machine/cache_images.go index 301c3b02fd..40e35a2c69 100644 --- a/pkg/minikube/machine/cache_images.go +++ b/pkg/minikube/machine/cache_images.go @@ -137,29 +137,31 @@ func CacheAndLoadImages(images []string) error { return errors.Wrap(err, "list profiles") } for _, p := range profiles { // loading images to all running profiles - pName := p.Name // capture the loop variable - status, err := GetHostStatus(api, pName) - if err != nil { - glog.Warningf("skipping loading cache for profile %s", pName) - glog.Errorf("error getting status for %s: %v", pName, err) - continue // try next machine - } - if status == state.Running.String() { // the not running hosts will load on next start - h, err := api.Load(pName) + for _, n := range p.Config.Nodes { + pName := n.Name // capture the loop variable + status, err := GetHostStatus(api, pName) if err != nil { - return err + glog.Warningf("skipping loading cache for profile %s", pName) + glog.Errorf("error getting status for %s: %v", pName, err) + continue // try next machine } - cr, err := CommandRunner(h) - if err != nil { - return err - } - c, err := config.Load(pName) - if err != nil { - return err - } - err = LoadImages(c, cr, images, constants.ImageCacheDir) - if err != nil { - glog.Warningf("Failed to load cached images for profile %s. make sure the profile is running. %v", pName, err) + if status == state.Running.String() { // the not running hosts will load on next start + h, err := api.Load(pName) + if err != nil { + return err + } + cr, err := CommandRunner(h) + if err != nil { + return err + } + c, err := config.Load(pName) + if err != nil { + return err + } + err = LoadImages(c, cr, images, constants.ImageCacheDir) + if err != nil { + glog.Warningf("Failed to load cached images for profile %s. make sure the profile is running. %v", pName, err) + } } } } diff --git a/pkg/minikube/machine/cluster_test.go b/pkg/minikube/machine/cluster_test.go index ad326611c0..2053320a75 100644 --- a/pkg/minikube/machine/cluster_test.go +++ b/pkg/minikube/machine/cluster_test.go @@ -41,7 +41,7 @@ type MockDownloader struct{} func (d MockDownloader) GetISOFileURI(isoURL string) string { return "" } func (d MockDownloader) CacheMinikubeISOFromURL(isoURL string) error { return nil } -func createMockDriverHost(c config.ClusterConfig) (interface{}, error) { +func createMockDriverHost(c config.ClusterConfig, n config.Node) (interface{}, error) { return nil, nil } @@ -67,28 +67,35 @@ var defaultClusterConfig = config.ClusterConfig{ DockerEnv: []string{"MOCK_MAKE_IT_PROVISION=true"}, } +var defaultNodeConfig = config.Node{ + Name: viper.GetString("profile"), +} + func TestCreateHost(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) - exists, _ := api.Exists(viper.GetString("profile")) + profile := viper.GetString("profile") + exists, _ := api.Exists(profile) if exists { t.Fatal("Machine already exists.") } - _, err := createHost(api, defaultClusterConfig) + n := config.Node{Name: profile} + + _, err := createHost(api, defaultClusterConfig, n) if err != nil { t.Fatalf("Error creating host: %v", err) } - exists, err = api.Exists(viper.GetString("profile")) + exists, err = api.Exists(profile) if err != nil { - t.Fatalf("exists failed for %q: %v", viper.GetString("profile"), err) + t.Fatalf("exists failed for %q: %v", profile, err) } if !exists { - t.Fatalf("%q does not exist, but should.", viper.GetString("profile")) + t.Fatalf("%q does not exist, but should.", profile) } - h, err := api.Load(viper.GetString("profile")) + h, err := api.Load(profile) if err != nil { t.Fatalf("Error loading machine: %v", err) } @@ -113,8 +120,9 @@ func TestCreateHost(t *testing.T) { func TestStartHostExists(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) + // Create an initial host. - ih, err := createHost(api, defaultClusterConfig) + ih, err := createHost(api, defaultClusterConfig, defaultNodeConfig) if err != nil { t.Fatalf("Error creating host: %v", err) } @@ -131,9 +139,8 @@ func TestStartHostExists(t *testing.T) { mc := defaultClusterConfig mc.Name = ih.Name - n := config.Node{Name: ih.Name} // This should pass without calling Create because the host exists already. - h, err := StartHost(api, mc, n) + h, err := StartHost(api, mc, defaultNodeConfig) if err != nil { t.Fatalf("Error starting host: %v", err) } @@ -153,7 +160,7 @@ func TestStartHostErrMachineNotExist(t *testing.T) { api := tests.NewMockAPI(t) // Create an incomplete host with machine does not exist error(i.e. User Interrupt Cancel) api.NotExistError = true - h, err := createHost(api, defaultClusterConfig) + h, err := createHost(api, defaultClusterConfig, defaultNodeConfig) if err != nil { t.Fatalf("Error creating host: %v", err) } @@ -199,7 +206,7 @@ func TestStartStoppedHost(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) // Create an initial host. - h, err := createHost(api, defaultClusterConfig) + h, err := createHost(api, defaultClusterConfig, defaultNodeConfig) if err != nil { t.Fatalf("Error creating host: %v", err) } @@ -311,7 +318,7 @@ func TestStopHostError(t *testing.T) { func TestStopHost(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) - h, err := createHost(api, defaultClusterConfig) + h, err := createHost(api, defaultClusterConfig, defaultNodeConfig) if err != nil { t.Errorf("createHost failed: %v", err) } @@ -327,7 +334,7 @@ func TestStopHost(t *testing.T) { func TestDeleteHost(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) - if _, err := createHost(api, defaultClusterConfig); err != nil { + if _, err := createHost(api, defaultClusterConfig, defaultNodeConfig); err != nil { t.Errorf("createHost failed: %v", err) } @@ -339,7 +346,7 @@ func TestDeleteHost(t *testing.T) { func TestDeleteHostErrorDeletingVM(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) - h, err := createHost(api, defaultClusterConfig) + h, err := createHost(api, defaultClusterConfig, defaultNodeConfig) if err != nil { t.Errorf("createHost failed: %v", err) } @@ -356,7 +363,7 @@ func TestDeleteHostErrorDeletingFiles(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) api.RemoveError = true - if _, err := createHost(api, defaultClusterConfig); err != nil { + if _, err := createHost(api, defaultClusterConfig, defaultNodeConfig); err != nil { t.Errorf("createHost failed: %v", err) } @@ -370,7 +377,7 @@ func TestDeleteHostErrMachineNotExist(t *testing.T) { api := tests.NewMockAPI(t) // Create an incomplete host with machine does not exist error(i.e. User Interrupt Cancel) api.NotExistError = true - _, err := createHost(api, defaultClusterConfig) + _, err := createHost(api, defaultClusterConfig, defaultNodeConfig) if err != nil { t.Errorf("createHost failed: %v", err) } @@ -396,7 +403,7 @@ func TestGetHostStatus(t *testing.T) { checkState(state.None.String()) - if _, err := createHost(api, defaultClusterConfig); err != nil { + if _, err := createHost(api, defaultClusterConfig, defaultNodeConfig); err != nil { t.Errorf("createHost failed: %v", err) } diff --git a/pkg/minikube/machine/fix.go b/pkg/minikube/machine/fix.go index 8ea159a124..3b465e5025 100644 --- a/pkg/minikube/machine/fix.go +++ b/pkg/minikube/machine/fix.go @@ -88,7 +88,7 @@ func fixHost(api libmachine.API, mc config.ClusterConfig, n config.Node) (*host. } // recreate virtual machine out.T(out.Meh, "machine '{{.name}}' does not exist. Proceeding ahead with recreating VM.", out.V{"name": mc.Name}) - h, err = createHost(api, mc) + h, err = createHost(api, mc, n) if err != nil { return nil, errors.Wrap(err, "Error recreating VM") } diff --git a/pkg/minikube/machine/start.go b/pkg/minikube/machine/start.go index 4ae16dc005..1d34f28070 100644 --- a/pkg/minikube/machine/start.go +++ b/pkg/minikube/machine/start.go @@ -69,7 +69,7 @@ func StartHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*ho } start := time.Now() defer func() { - glog.Infof("releasing machines lock for %q, held for %s", cfg.Name, time.Since(start)) + glog.Infof("releasing machines lock for %q, held for %s", n.Name, time.Since(start)) releaser.Release() }() @@ -78,8 +78,8 @@ func StartHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*ho return nil, errors.Wrapf(err, "exists: %s", n.Name) } if !exists { - glog.Infof("Provisioning new machine with config: %+v", n) - return createHost(api, cfg) + glog.Infof("Provisioning new machine with config: %+v %+v", cfg, n) + return createHost(api, cfg, n) } glog.Infoln("Skipping create...Using existing machine configuration") return fixHost(api, cfg, n) @@ -96,8 +96,8 @@ func engineOptions(cfg config.ClusterConfig) *engine.Options { return &o } -func createHost(api libmachine.API, cfg config.ClusterConfig) (*host.Host, error) { - glog.Infof("createHost starting for %q (driver=%q)", cfg.Name, cfg.Driver) +func createHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*host.Host, error) { + glog.Infof("createHost starting for %q (driver=%q)", n.Name, cfg.Driver) start := time.Now() defer func() { glog.Infof("createHost completed in %s", time.Since(start)) @@ -114,7 +114,7 @@ func createHost(api libmachine.API, cfg config.ClusterConfig) (*host.Host, error if def.Empty() { return nil, fmt.Errorf("unsupported/missing driver: %s", cfg.Driver) } - dd, err := def.Config(cfg) + dd, err := def.Config(cfg, n) if err != nil { return nil, errors.Wrap(err, "config") } diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index ac247b0ce3..af9856d077 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -19,6 +19,7 @@ package node import ( "errors" + "github.com/golang/glog" "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/machine" @@ -76,10 +77,9 @@ func Delete(cc config.ClusterConfig, name string) error { return err } - /*err = Stop(cc, nd) if err != nil { glog.Warningf("Failed to stop node %s. Will still try to delete.", name) - }*/ + } api, err := machine.NewAPIClient() if err != nil { @@ -105,20 +105,3 @@ func Retrieve(cc *config.ClusterConfig, name string) (*config.Node, int, error) return nil, -1, errors.New("Could not find node " + name) } - -// Save saves a node to a cluster -func Save(cfg *config.ClusterConfig, node *config.Node) error { - update := false - for i, n := range cfg.Nodes { - if n.Name == node.Name { - cfg.Nodes[i] = *node - update = true - break - } - } - - if !update { - cfg.Nodes = append(cfg.Nodes, *node) - } - return config.SaveProfile(viper.GetString(config.MachineProfile), cfg) -} diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index d43d9f455d..22dd8ac868 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -38,6 +38,11 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo var cacheGroup errgroup.Group beginCacheRequiredImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion) + // Why do we need this? + if cc.Downloader == nil { + cc.Downloader = util.DefaultDownloader{} + } + runner, preExists, mAPI, _ := cluster.StartMachine(&cc, &n) defer mAPI.Close() diff --git a/pkg/minikube/registry/drvs/docker/docker.go b/pkg/minikube/registry/drvs/docker/docker.go index 0b66dfdecb..d5278a8f92 100644 --- a/pkg/minikube/registry/drvs/docker/docker.go +++ b/pkg/minikube/registry/drvs/docker/docker.go @@ -43,15 +43,15 @@ func init() { } } -func configure(mc config.ClusterConfig) (interface{}, error) { +func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) { return kic.NewDriver(kic.Config{ - MachineName: mc.Name, + MachineName: n.Name, StorePath: localpath.MiniPath(), ImageDigest: kic.BaseImage, CPU: mc.CPUs, Memory: mc.Memory, OCIBinary: oci.Docker, - APIServerPort: mc.Nodes[0].Port, + APIServerPort: n.Port, }), nil } diff --git a/pkg/minikube/registry/drvs/hyperkit/hyperkit.go b/pkg/minikube/registry/drvs/hyperkit/hyperkit.go index 47a3db9091..b9e4b4f09f 100644 --- a/pkg/minikube/registry/drvs/hyperkit/hyperkit.go +++ b/pkg/minikube/registry/drvs/hyperkit/hyperkit.go @@ -31,7 +31,7 @@ import ( "github.com/pborman/uuid" "k8s.io/minikube/pkg/drivers/hyperkit" - cfg "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/registry" @@ -57,28 +57,29 @@ func init() { } } -func configure(config cfg.ClusterConfig) (interface{}, error) { - u := config.UUID +func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) { + u := cfg.UUID if u == "" { u = uuid.NewUUID().String() } return &hyperkit.Driver{ BaseDriver: &drivers.BaseDriver{ - MachineName: config.Name, + MachineName: cfg.Name, StorePath: localpath.MiniPath(), SSHUser: "docker", }, - Boot2DockerURL: config.Downloader.GetISOFileURI(config.MinikubeISO), - DiskSize: config.DiskSize, - Memory: config.Memory, - CPU: config.CPUs, - NFSShares: config.NFSShare, - NFSSharesRoot: config.NFSSharesRoot, + ClusterName: cfg.Name, + Boot2DockerURL: cfg.Downloader.GetISOFileURI(cfg.MinikubeISO), + DiskSize: cfg.DiskSize, + Memory: cfg.Memory, + CPU: cfg.CPUs, + NFSShares: cfg.NFSShare, + NFSSharesRoot: cfg.NFSSharesRoot, UUID: u, - VpnKitSock: config.HyperkitVpnKitSock, - VSockPorts: config.HyperkitVSockPorts, - Cmdline: "loglevel=3 console=ttyS0 console=tty0 noembed nomodeset norestore waitusb=10 systemd.legacy_systemd_cgroup_controller=yes random.trust_cpu=on hw_rng_model=virtio base host=" + config.Name, + VpnKitSock: cfg.HyperkitVpnKitSock, + VSockPorts: cfg.HyperkitVSockPorts, + Cmdline: "loglevel=3 console=ttyS0 console=tty0 noembed nomodeset norestore waitusb=10 systemd.legacy_systemd_cgroup_controller=yes random.trust_cpu=on hw_rng_model=virtio base host=" + n.Name, }, nil } diff --git a/pkg/minikube/registry/drvs/parallels/parallels.go b/pkg/minikube/registry/drvs/parallels/parallels.go index 79d0e9085e..29095a6226 100644 --- a/pkg/minikube/registry/drvs/parallels/parallels.go +++ b/pkg/minikube/registry/drvs/parallels/parallels.go @@ -24,7 +24,7 @@ import ( parallels "github.com/Parallels/docker-machine-parallels" "github.com/docker/machine/libmachine/drivers" - cfg "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/registry" @@ -44,12 +44,12 @@ func init() { } -func configure(config cfg.ClusterConfig) (interface{}, error) { - d := parallels.NewDriver(config.Name, localpath.MiniPath()).(*parallels.Driver) - d.Boot2DockerURL = config.Downloader.GetISOFileURI(config.MinikubeISO) - d.Memory = config.Memory - d.CPU = config.CPUs - d.DiskSize = config.DiskSize +func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) { + d := parallels.NewDriver(n.Name, localpath.MiniPath()).(*parallels.Driver) + d.Boot2DockerURL = cfg.Downloader.GetISOFileURI(cfg.MinikubeISO) + d.Memory = cfg.Memory + d.CPU = cfg.CPUs + d.DiskSize = cfg.DiskSize return d, nil } diff --git a/pkg/minikube/registry/drvs/podman/podman.go b/pkg/minikube/registry/drvs/podman/podman.go index ec5d6013ac..eab3200b83 100644 --- a/pkg/minikube/registry/drvs/podman/podman.go +++ b/pkg/minikube/registry/drvs/podman/podman.go @@ -49,15 +49,15 @@ func init() { } } -func configure(mc config.ClusterConfig) (interface{}, error) { +func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) { return kic.NewDriver(kic.Config{ - MachineName: mc.Name, + MachineName: n.Name, StorePath: localpath.MiniPath(), ImageDigest: strings.Split(kic.BaseImage, "@")[0], // for podman does not support docker images references with both a tag and digest. CPU: mc.CPUs, Memory: mc.Memory, OCIBinary: oci.Podman, - APIServerPort: mc.Nodes[0].Port, + APIServerPort: n.Port, }), nil } diff --git a/pkg/minikube/registry/drvs/virtualbox/virtualbox.go b/pkg/minikube/registry/drvs/virtualbox/virtualbox.go index c3888c3758..7dd13af948 100644 --- a/pkg/minikube/registry/drvs/virtualbox/virtualbox.go +++ b/pkg/minikube/registry/drvs/virtualbox/virtualbox.go @@ -49,8 +49,8 @@ func init() { } } -func configure(mc config.ClusterConfig) (interface{}, error) { - d := virtualbox.NewDriver(mc.Name, localpath.MiniPath()) +func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) { + d := virtualbox.NewDriver(n.Name, localpath.MiniPath()) d.Boot2DockerURL = mc.Downloader.GetISOFileURI(mc.MinikubeISO) d.Memory = mc.Memory d.CPU = mc.CPUs diff --git a/pkg/minikube/registry/drvs/vmware/vmware.go b/pkg/minikube/registry/drvs/vmware/vmware.go index 0333dce541..b6a90929c9 100644 --- a/pkg/minikube/registry/drvs/vmware/vmware.go +++ b/pkg/minikube/registry/drvs/vmware/vmware.go @@ -39,8 +39,8 @@ func init() { } } -func configure(mc config.ClusterConfig) (interface{}, error) { - d := vmwcfg.NewConfig(mc.Name, localpath.MiniPath()) +func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) { + d := vmwcfg.NewConfig(n.Name, localpath.MiniPath()) d.Boot2DockerURL = mc.Downloader.GetISOFileURI(mc.MinikubeISO) d.Memory = mc.Memory d.CPU = mc.CPUs diff --git a/pkg/minikube/registry/drvs/vmwarefusion/vmwarefusion.go b/pkg/minikube/registry/drvs/vmwarefusion/vmwarefusion.go index 524e50f88c..adc50d70e8 100644 --- a/pkg/minikube/registry/drvs/vmwarefusion/vmwarefusion.go +++ b/pkg/minikube/registry/drvs/vmwarefusion/vmwarefusion.go @@ -26,7 +26,7 @@ import ( "github.com/docker/machine/libmachine/drivers" "github.com/pkg/errors" - cfg "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/registry" @@ -44,12 +44,12 @@ func init() { } } -func configure(config cfg.ClusterConfig) (interface{}, error) { - d := vmwarefusion.NewDriver(config.Name, localpath.MiniPath()).(*vmwarefusion.Driver) - d.Boot2DockerURL = config.Downloader.GetISOFileURI(config.MinikubeISO) - d.Memory = config.Memory - d.CPU = config.CPUs - d.DiskSize = config.DiskSize +func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) { + d := vmwarefusion.NewDriver(n.Name, localpath.MiniPath()).(*vmwarefusion.Driver) + d.Boot2DockerURL = cfg.Downloader.GetISOFileURI(cfg.MinikubeISO) + d.Memory = cfg.Memory + d.CPU = cfg.CPUs + d.DiskSize = cfg.DiskSize // TODO(philips): push these defaults upstream to fixup this driver d.SSHPort = 22 diff --git a/pkg/minikube/registry/registry.go b/pkg/minikube/registry/registry.go index e5fb98ce51..794dffc18a 100644 --- a/pkg/minikube/registry/registry.go +++ b/pkg/minikube/registry/registry.go @@ -60,7 +60,7 @@ type Registry interface { } // Configurator emits a struct to be marshalled into JSON for Machine Driver -type Configurator func(config.ClusterConfig) (interface{}, error) +type Configurator func(config.ClusterConfig, config.Node) (interface{}, error) // Loader is a function that loads a byte stream and creates a driver. type Loader func() drivers.Driver From be9c5f476149bc8904924c24043568180b0db70c Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 24 Feb 2020 15:58:35 -0800 Subject: [PATCH 06/42] pass in the node object into add --- cmd/minikube/cmd/node_add.go | 10 +++++++++- cmd/minikube/cmd/start.go | 8 +++++++- pkg/minikube/node/node.go | 24 ++---------------------- 3 files changed, 18 insertions(+), 24 deletions(-) diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index fe5557258d..8cb836edea 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -53,7 +53,15 @@ var nodeAddCmd = &cobra.Command{ } out.T(out.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": profile}) - err = node.Add(mc, name, cp, worker, "", profile) + // TODO: Deal with parameters better. Ideally we should be able to acceot any node-specific minikube start params here. + n := config.Node{ + Name: name, + Worker: worker, + ControlPlane: cp, + KubernetesVersion: mc.KubernetesConfig.KubernetesVersion, + } + + err = node.Add(mc, n) if err != nil { exit.WithError("Error adding node to cluster", err) } diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index fbb30b7adb..0aa5a8691a 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -357,7 +357,13 @@ func runStart(cmd *cobra.Command, args []string) { if numNodes > 1 { for i := 0; i < numNodes-1; i++ { nodeName := fmt.Sprintf("%s%d", n.Name, i+1) - node.Add(&mc, nodeName, false, true, "", "") + n := config.Node{ + Name: nodeName, + Worker: true, + ControlPlane: false, + KubernetesVersion: mc.KubernetesConfig.KubernetesVersion, + } + node.Add(&mc, n) } } } diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index af9856d077..86df75e021 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -38,29 +38,9 @@ const ( ) // Add adds a new node config to an existing cluster. -func Add(cc *config.ClusterConfig, name string, controlPlane bool, worker bool, k8sVersion string, profileName string) error { - n := config.Node{ - Name: name, - Worker: true, - } - - // TODO: Deal with parameters better. Ideally we should be able to acceot any node-specific minikube start params here. - if controlPlane { - n.ControlPlane = true - } - - if !worker { - n.Worker = false - } - - if k8sVersion != "" { - n.KubernetesVersion = k8sVersion - } else { - n.KubernetesVersion = cc.KubernetesConfig.KubernetesVersion - } - +func Add(cc *config.ClusterConfig, n config.Node) error { cc.Nodes = append(cc.Nodes, n) - err := config.SaveProfile(profileName, cc) + err := config.SaveProfile(cc.Name, cc) if err != nil { return err } From f0ca34b0b549f267e60622cdb505a00e265ef615 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 24 Feb 2020 16:17:29 -0800 Subject: [PATCH 07/42] fix unit tests --- pkg/minikube/machine/cluster_test.go | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/pkg/minikube/machine/cluster_test.go b/pkg/minikube/machine/cluster_test.go index 2053320a75..449acd729e 100644 --- a/pkg/minikube/machine/cluster_test.go +++ b/pkg/minikube/machine/cluster_test.go @@ -75,27 +75,26 @@ func TestCreateHost(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) - profile := viper.GetString("profile") - exists, _ := api.Exists(profile) + exists, _ := api.Exists(viper.GetString("profile")) if exists { t.Fatal("Machine already exists.") } - n := config.Node{Name: profile} + n := config.Node{Name: viper.GetString("profile")} _, err := createHost(api, defaultClusterConfig, n) if err != nil { t.Fatalf("Error creating host: %v", err) } - exists, err = api.Exists(profile) + exists, err = api.Exists(viper.GetString("profile")) if err != nil { - t.Fatalf("exists failed for %q: %v", profile, err) + t.Fatalf("exists failed for %q: %v", viper.GetString("profile"), err) } if !exists { - t.Fatalf("%q does not exist, but should.", profile) + t.Fatalf("%q does not exist, but should.", viper.GetString("profile")) } - h, err := api.Load(profile) + h, err := api.Load(viper.GetString("profile")) if err != nil { t.Fatalf("Error loading machine: %v", err) } @@ -121,6 +120,8 @@ func TestStartHostExists(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) + n := defaultNodeConfig + // Create an initial host. ih, err := createHost(api, defaultClusterConfig, defaultNodeConfig) if err != nil { @@ -139,8 +140,10 @@ func TestStartHostExists(t *testing.T) { mc := defaultClusterConfig mc.Name = ih.Name + n.Name = ih.Name + // This should pass without calling Create because the host exists already. - h, err := StartHost(api, mc, defaultNodeConfig) + h, err := StartHost(api, mc, n) if err != nil { t.Fatalf("Error starting host: %v", err) } @@ -294,13 +297,13 @@ func TestStartHostConfig(t *testing.T) { } for i := range h.HostOptions.EngineOptions.Env { - if h.HostOptions.EngineOptions.Env[i] != config.DockerEnv[i] { + if h.HostOptions.EngineOptions.Env[i] != cfg.DockerEnv[i] { t.Fatal("Docker env variables were not set!") } } for i := range h.HostOptions.EngineOptions.ArbitraryFlags { - if h.HostOptions.EngineOptions.ArbitraryFlags[i] != config.DockerOpt[i] { + if h.HostOptions.EngineOptions.ArbitraryFlags[i] != cfg.DockerOpt[i] { t.Fatal("Docker flags were not set!") } } From b2ba874d560b1b2b613ffe7d8d837c7890918630 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 24 Feb 2020 16:27:05 -0800 Subject: [PATCH 08/42] SaveNode is simpler yeah --- pkg/minikube/cluster/setup.go | 2 +- pkg/minikube/config/profile.go | 4 ++-- pkg/minikube/node/node.go | 5 ++--- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/pkg/minikube/cluster/setup.go b/pkg/minikube/cluster/setup.go index d786312820..dc1f46a0cd 100644 --- a/pkg/minikube/cluster/setup.go +++ b/pkg/minikube/cluster/setup.go @@ -165,7 +165,7 @@ func StartMachine(cfg *config.ClusterConfig, node *config.Node) (runner command. } node.IP = ip - config.SaveNodeToProfile(cfg, node) + config.SaveNode(cfg, node) return runner, preExists, m, host } diff --git a/pkg/minikube/config/profile.go b/pkg/minikube/config/profile.go index 0acfe1a8b4..78dee9a57e 100644 --- a/pkg/minikube/config/profile.go +++ b/pkg/minikube/config/profile.go @@ -91,8 +91,8 @@ func CreateEmptyProfile(name string, miniHome ...string) error { return SaveProfile(name, cfg, miniHome...) } -// SaveNodeToProfile saves a node to a cluster -func SaveNodeToProfile(cfg *ClusterConfig, node *Node) error { +// SaveNode saves a node to a cluster +func SaveNode(cfg *ClusterConfig, node *Node) error { update := false for i, n := range cfg.Nodes { if n.Name == node.Name { diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index 86df75e021..b7c3cab1fd 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -39,14 +39,13 @@ const ( // Add adds a new node config to an existing cluster. func Add(cc *config.ClusterConfig, n config.Node) error { - cc.Nodes = append(cc.Nodes, n) - err := config.SaveProfile(cc.Name, cc) + + err := config.SaveNode(cc, &n) if err != nil { return err } err = Start(*cc, n, nil) - return err } From f5bdba6088c272901bba9e6113492d7a006f9eb6 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 24 Feb 2020 16:47:07 -0800 Subject: [PATCH 09/42] fix kvm2 configurator --- pkg/minikube/registry/drvs/kvm2/kvm2.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/minikube/registry/drvs/kvm2/kvm2.go b/pkg/minikube/registry/drvs/kvm2/kvm2.go index a3dbf67193..5dd00c12c3 100644 --- a/pkg/minikube/registry/drvs/kvm2/kvm2.go +++ b/pkg/minikube/registry/drvs/kvm2/kvm2.go @@ -67,8 +67,8 @@ type kvmDriver struct { ConnectionURI string } -func configure(mc config.ClusterConfig) (interface{}, error) { - name := mc.Name +func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) { + name := n.Name return kvmDriver{ BaseDriver: &drivers.BaseDriver{ MachineName: name, From feaa9fc3b3549cc7ec358998a614da45e382a632 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 24 Feb 2020 16:50:30 -0800 Subject: [PATCH 10/42] hyperv and none drivers needed fixing too --- pkg/minikube/registry/drvs/hyperv/hyperv.go | 22 ++++++++++----------- pkg/minikube/registry/drvs/none/none.go | 4 ++-- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/pkg/minikube/registry/drvs/hyperv/hyperv.go b/pkg/minikube/registry/drvs/hyperv/hyperv.go index 89f63c93f3..841d38d540 100644 --- a/pkg/minikube/registry/drvs/hyperv/hyperv.go +++ b/pkg/minikube/registry/drvs/hyperv/hyperv.go @@ -29,7 +29,7 @@ import ( "github.com/docker/machine/libmachine/drivers" "github.com/pkg/errors" - cfg "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/registry" @@ -52,16 +52,16 @@ func init() { } } -func configure(config cfg.ClusterConfig) (interface{}, error) { - d := hyperv.NewDriver(config.Name, localpath.MiniPath()) - d.Boot2DockerURL = config.Downloader.GetISOFileURI(config.MinikubeISO) - d.VSwitch = config.HypervVirtualSwitch - if d.VSwitch == "" && config.HypervUseExternalSwitch { - switchName, adapter, err := chooseSwitch(config.HypervExternalAdapter) +func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) { + d := hyperv.NewDriver(n.Name, localpath.MiniPath()) + d.Boot2DockerURL = cfg.Downloader.GetISOFileURI(cfg.MinikubeISO) + d.VSwitch = cfg.HypervVirtualSwitch + if d.VSwitch == "" && cfg.HypervUseExternalSwitch { + switchName, adapter, err := chooseSwitch(cfg.HypervExternalAdapter) if err != nil { return nil, errors.Wrapf(err, "failed to choose switch for Hyper-V driver") } - if config.HypervExternalAdapter == "" && switchName == "" { + if cfg.HypervExternalAdapter == "" && switchName == "" { // create a switch on the returned adapter switchName = defaultExternalSwitchName err := createVMSwitch(switchName, adapter) @@ -71,9 +71,9 @@ func configure(config cfg.ClusterConfig) (interface{}, error) { } d.VSwitch = switchName } - d.MemSize = config.Memory - d.CPU = config.CPUs - d.DiskSize = config.DiskSize + d.MemSize = cfg.Memory + d.CPU = cfg.CPUs + d.DiskSize = cfg.DiskSize d.SSHUser = "docker" d.DisableDynamicMemory = true // default to disable dynamic memory as minikube is unlikely to work properly with dynamic memory return d, nil diff --git a/pkg/minikube/registry/drvs/none/none.go b/pkg/minikube/registry/drvs/none/none.go index 4e1ae1a794..4bf39d6d7b 100644 --- a/pkg/minikube/registry/drvs/none/none.go +++ b/pkg/minikube/registry/drvs/none/none.go @@ -42,9 +42,9 @@ func init() { } } -func configure(mc config.ClusterConfig) (interface{}, error) { +func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) { return none.NewDriver(none.Config{ - MachineName: mc.Name, + MachineName: n.Name, StorePath: localpath.MiniPath(), ContainerRuntime: mc.KubernetesConfig.ContainerRuntime, }), nil From 3d7215295384af1b6a72fa8aa04d2b757eadc2f6 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 24 Feb 2020 17:28:10 -0800 Subject: [PATCH 11/42] fixing lint and other random incorrect stuff --- cmd/minikube/cmd/delete.go | 3 ++- cmd/minikube/cmd/start.go | 10 ++++++++-- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 2 +- pkg/minikube/cluster/setup.go | 6 ++++-- pkg/minikube/node/node.go | 2 -- pkg/minikube/node/start.go | 8 +++++++- 6 files changed, 22 insertions(+), 9 deletions(-) diff --git a/cmd/minikube/cmd/delete.go b/cmd/minikube/cmd/delete.go index 9f43331c70..cc74e5b89e 100644 --- a/cmd/minikube/cmd/delete.go +++ b/cmd/minikube/cmd/delete.go @@ -229,8 +229,9 @@ func deleteProfile(profile *pkg_config.Profile) error { delErr := profileDeletionErr(profile.Name, fmt.Sprintf("%v", err)) deletionError.Err = delErr e = deletionError + } else { + e = err } - e = err } } if e != nil { diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 0aa5a8691a..194c1c988a 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -355,7 +355,10 @@ func runStart(cmd *cobra.Command, args []string) { numNodes := viper.GetInt(nodes) if numNodes > 1 { - for i := 0; i < numNodes-1; i++ { + if driver.IsKIC(driverName) { + out.T(out.Meh, "The none driver is not compatible with multi-node clusters.") + } + for i := 1; i < numNodes; i++ { nodeName := fmt.Sprintf("%s%d", n.Name, i+1) n := config.Node{ Name: nodeName, @@ -363,7 +366,10 @@ func runStart(cmd *cobra.Command, args []string) { ControlPlane: false, KubernetesVersion: mc.KubernetesConfig.KubernetesVersion, } - node.Add(&mc, n) + err := node.Add(&mc, n) + if err != nil { + exit.WithError("adding node", err) + } } } } diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 663e538f8c..6d1110c5de 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -371,7 +371,7 @@ func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinC fmt.Println(joinCmd) out, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", joinCmd)) if err != nil { - return errors.Wrapf(err, "cmd failed: %s\n%s\n", joinCmd, out) + return errors.Wrapf(err, "cmd failed: %s\n%+v\n", joinCmd, out) } if _, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", "sudo systemctl daemon-reload && sudo systemctl enable kubelet && sudo systemctl start kubelet")); err != nil { diff --git a/pkg/minikube/cluster/setup.go b/pkg/minikube/cluster/setup.go index dc1f46a0cd..ab5bd09d45 100644 --- a/pkg/minikube/cluster/setup.go +++ b/pkg/minikube/cluster/setup.go @@ -52,7 +52,6 @@ const ( embedCerts = "embed-certs" keepContext = "keep-context" imageRepository = "image-repository" - containerRuntime = "container-runtime" ) // InitialSetup performs all necessary operations on the initial control plane node when first spinning up a cluster @@ -165,7 +164,10 @@ func StartMachine(cfg *config.ClusterConfig, node *config.Node) (runner command. } node.IP = ip - config.SaveNode(cfg, node) + err = config.SaveNode(cfg, node) + if err != nil { + exit.WithError("saving node", err) + } return runner, preExists, m, host } diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index b7c3cab1fd..1f2147cbc5 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -30,8 +30,6 @@ const ( waitUntilHealthy = "wait" cacheImageConfigKey = "cache" containerRuntime = "container-runtime" - embedCerts = "embed-certs" - keepContext = "keep-context" mountString = "mount-string" createMount = "mount" waitTimeout = "wait-timeout" diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 22dd8ac868..a57a48a150 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -88,7 +88,10 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo } } - bs.SetupCerts(cc.KubernetesConfig, n) + err = bs.SetupCerts(cc.KubernetesConfig, n) + if err != nil { + exit.WithError("setting up certs", err) + } cp, err := config.PrimaryControlPlane(cc) if err != nil { @@ -99,6 +102,9 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo exit.WithError("Getting bootstrapper", err) } joinCmd, err := cpBs.GenerateToken(cc.KubernetesConfig) + if err != nil { + exit.WithError("generating join token", err) + } return bs.JoinCluster(cc, n, joinCmd) } From 39f03bc925a0d7057c4bc2d1a8d93cc92a3fb32d Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 24 Feb 2020 17:39:41 -0800 Subject: [PATCH 12/42] prepareNone was in the wrong spot --- pkg/minikube/cluster/setup.go | 36 ++++++++++++++++++++++++++++++++ pkg/minikube/node/start.go | 39 ----------------------------------- 2 files changed, 36 insertions(+), 39 deletions(-) diff --git a/pkg/minikube/cluster/setup.go b/pkg/minikube/cluster/setup.go index ab5bd09d45..d8b5e3ed48 100644 --- a/pkg/minikube/cluster/setup.go +++ b/pkg/minikube/cluster/setup.go @@ -43,6 +43,7 @@ import ( "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/proxy" + "k8s.io/minikube/pkg/util" "k8s.io/minikube/pkg/util/retry" ) @@ -83,6 +84,12 @@ func InitialSetup(cc config.ClusterConfig, n config.Node, existingAddons map[str addons.Start(viper.GetString(config.MachineProfile), existingAddons, config.AddonList) } + // special ops for none , like change minikube directory. + // multinode super doesn't work on the none driver + if cc.Driver == driver.None && len(cc.Nodes) == 1 { + prepareNone() + } + // Skip pre-existing, because we already waited for health if viper.GetBool(waitUntilHealthy) && !preExists { if err := bs.WaitForCluster(cc, viper.GetDuration(waitTimeout)); err != nil { @@ -287,3 +294,32 @@ func tryRegistry(r command.Runner) { out.WarningT("VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository", out.V{"repository": repo}) } } + +// prepareNone prepares the user and host for the joy of the "none" driver +func prepareNone() { + out.T(out.StartingNone, "Configuring local host environment ...") + if viper.GetBool(config.WantNoneDriverWarning) { + out.T(out.Empty, "") + out.WarningT("The 'none' driver provides limited isolation and may reduce system security and reliability.") + out.WarningT("For more information, see:") + out.T(out.URL, "https://minikube.sigs.k8s.io/docs/reference/drivers/none/") + out.T(out.Empty, "") + } + + if os.Getenv("CHANGE_MINIKUBE_NONE_USER") == "" { + home := os.Getenv("HOME") + out.WarningT("kubectl and minikube configuration will be stored in {{.home_folder}}", out.V{"home_folder": home}) + out.WarningT("To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:") + + out.T(out.Empty, "") + out.T(out.Command, "sudo mv {{.home_folder}}/.kube {{.home_folder}}/.minikube $HOME", out.V{"home_folder": home}) + out.T(out.Command, "sudo chown -R $USER $HOME/.kube $HOME/.minikube") + out.T(out.Empty, "") + + out.T(out.Tip, "This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true") + } + + if err := util.MaybeChownDirRecursiveToMinikubeUser(localpath.MiniPath()); err != nil { + exit.WithCodeT(exit.Permissions, "Failed to change permissions for {{.minikube_dir_path}}: {{.error}}", out.V{"minikube_dir_path": localpath.MiniPath(), "error": err}) + } +} diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index a57a48a150..b7ebb5f816 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -17,17 +17,13 @@ limitations under the License. package node import ( - "os" - "github.com/spf13/viper" "golang.org/x/sync/errgroup" cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" "k8s.io/minikube/pkg/addons" "k8s.io/minikube/pkg/minikube/cluster" "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" - "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/util" ) @@ -75,12 +71,6 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo out.T(out.FailureType, "Unable to load cached images from config file.") } - // special ops for none , like change minikube directory. - // multinode super doesn't work on the none driver - if driverName == driver.None && len(cc.Nodes) == 1 { - prepareNone() - } - // Skip pre-existing, because we already waited for health if viper.GetBool(waitUntilHealthy) && !preExists { if err := bs.WaitForCluster(cc, viper.GetDuration(waitTimeout)); err != nil { @@ -107,32 +97,3 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo } return bs.JoinCluster(cc, n, joinCmd) } - -// prepareNone prepares the user and host for the joy of the "none" driver -func prepareNone() { - out.T(out.StartingNone, "Configuring local host environment ...") - if viper.GetBool(config.WantNoneDriverWarning) { - out.T(out.Empty, "") - out.WarningT("The 'none' driver provides limited isolation and may reduce system security and reliability.") - out.WarningT("For more information, see:") - out.T(out.URL, "https://minikube.sigs.k8s.io/docs/reference/drivers/none/") - out.T(out.Empty, "") - } - - if os.Getenv("CHANGE_MINIKUBE_NONE_USER") == "" { - home := os.Getenv("HOME") - out.WarningT("kubectl and minikube configuration will be stored in {{.home_folder}}", out.V{"home_folder": home}) - out.WarningT("To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:") - - out.T(out.Empty, "") - out.T(out.Command, "sudo mv {{.home_folder}}/.kube {{.home_folder}}/.minikube $HOME", out.V{"home_folder": home}) - out.T(out.Command, "sudo chown -R $USER $HOME/.kube $HOME/.minikube") - out.T(out.Empty, "") - - out.T(out.Tip, "This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true") - } - - if err := util.MaybeChownDirRecursiveToMinikubeUser(localpath.MiniPath()); err != nil { - exit.WithCodeT(exit.Permissions, "Failed to change permissions for {{.minikube_dir_path}}: {{.error}}", out.V{"minikube_dir_path": localpath.MiniPath(), "error": err}) - } -} From 0fadf91d2cbd79da48549e35fcc256f5045a49f3 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Tue, 25 Feb 2020 16:36:53 -0800 Subject: [PATCH 13/42] i think it works? --- cmd/minikube/cmd/delete.go | 39 ++++----- cmd/minikube/cmd/logs.go | 2 +- cmd/minikube/cmd/status.go | 87 ++++++++++++------- pkg/drivers/hyperkit/driver.go | 3 +- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 3 +- pkg/minikube/cluster/cluster.go | 4 +- pkg/minikube/cluster/setup.go | 2 +- pkg/minikube/node/start.go | 4 +- pkg/minikube/registry/drvs/docker/docker.go | 2 +- .../registry/drvs/hyperkit/hyperkit.go | 3 +- pkg/minikube/registry/drvs/hyperv/hyperv.go | 2 +- pkg/minikube/registry/drvs/kvm2/kvm2.go | 2 +- .../registry/drvs/parallels/parallels.go | 2 +- pkg/minikube/registry/drvs/podman/podman.go | 2 +- .../registry/drvs/virtualbox/virtualbox.go | 2 +- pkg/minikube/registry/drvs/vmware/vmware.go | 2 +- .../drvs/vmwarefusion/vmwarefusion.go | 2 +- pkg/provision/provision.go | 3 +- 18 files changed, 94 insertions(+), 72 deletions(-) diff --git a/cmd/minikube/cmd/delete.go b/cmd/minikube/cmd/delete.go index cc74e5b89e..8ed30668b4 100644 --- a/cmd/minikube/cmd/delete.go +++ b/cmd/minikube/cmd/delete.go @@ -221,21 +221,14 @@ func deleteProfile(profile *pkg_config.Profile) error { } if err == nil && driver.BareMetal(cc.Driver) { - var e error - for _, n := range cc.Nodes { - if err := uninstallKubernetes(api, profile.Name, cc.KubernetesConfig, viper.GetString(cmdcfg.Bootstrapper), n.Name); err != nil { - deletionError, ok := err.(DeletionError) - if ok { - delErr := profileDeletionErr(profile.Name, fmt.Sprintf("%v", err)) - deletionError.Err = delErr - e = deletionError - } else { - e = err - } + if err := uninstallKubernetes(api, profile.Name, cc.KubernetesConfig, viper.GetString(cmdcfg.Bootstrapper), cc.Nodes[0].Name); err != nil { + deletionError, ok := err.(DeletionError) + if ok { + delErr := profileDeletionErr(profile.Name, fmt.Sprintf("%v", err)) + deletionError.Err = delErr + return deletionError } - } - if e != nil { - return e + return err } } @@ -243,13 +236,15 @@ func deleteProfile(profile *pkg_config.Profile) error { out.T(out.FailureType, "Failed to kill mount process: {{.error}}", out.V{"error": err}) } - if err = machine.DeleteHost(api, profile.Name); err != nil { - switch errors.Cause(err).(type) { - case mcnerror.ErrHostDoesNotExist: - glog.Infof("%s cluster does not exist. Proceeding ahead with cleanup.", profile.Name) - default: - out.T(out.FailureType, "Failed to delete cluster: {{.error}}", out.V{"error": err}) - out.T(out.Notice, `You may need to manually remove the "{{.name}}" VM from your hypervisor`, out.V{"name": profile.Name}) + for _, n := range cc.Nodes { + if err = machine.DeleteHost(api, n.Name); err != nil { + switch errors.Cause(err).(type) { + case mcnerror.ErrHostDoesNotExist: + glog.Infof("%s cluster does not exist. Proceeding ahead with cleanup.", profile.Name) + default: + out.T(out.FailureType, "Failed to delete cluster: {{.error}}", out.V{"error": err}) + out.T(out.Notice, `You may need to manually remove the "{{.name}}" VM from your hypervisor`, out.V{"name": profile.Name}) + } } } @@ -311,7 +306,7 @@ func profileDeletionErr(profileName string, additionalInfo string) error { func uninstallKubernetes(api libmachine.API, profile string, kc pkg_config.KubernetesConfig, bsName string, nodeName string) error { out.T(out.Resetting, "Uninstalling Kubernetes {{.kubernetes_version}} using {{.bootstrapper_name}} ...", out.V{"kubernetes_version": kc.KubernetesVersion, "bootstrapper_name": bsName}) - clusterBootstrapper, err := cluster.Bootstrapper(api, bsName, nodeName) + clusterBootstrapper, err := cluster.Bootstrapper(api, bsName, profile, nodeName) if err != nil { return DeletionError{Err: fmt.Errorf("unable to get bootstrapper: %v", err), Errtype: Fatal} } diff --git a/cmd/minikube/cmd/logs.go b/cmd/minikube/cmd/logs.go index 934ca9c1b0..7b6ff5f757 100644 --- a/cmd/minikube/cmd/logs.go +++ b/cmd/minikube/cmd/logs.go @@ -67,7 +67,7 @@ var logsCmd = &cobra.Command{ if err != nil { exit.WithError("command runner", err) } - bs, err := cluster.Bootstrapper(api, viper.GetString(cmdcfg.Bootstrapper), viper.GetString(config.MachineProfile)) + bs, err := cluster.Bootstrapper(api, viper.GetString(cmdcfg.Bootstrapper), viper.GetString(config.MachineProfile), viper.GetString(config.MachineProfile)) if err != nil { exit.WithError("Error getting cluster bootstrapper", err) } diff --git a/cmd/minikube/cmd/status.go b/cmd/minikube/cmd/status.go index 0fb1c79284..dfd62da6eb 100644 --- a/cmd/minikube/cmd/status.go +++ b/cmd/minikube/cmd/status.go @@ -55,10 +55,13 @@ const ( // Nonexistent means nonexistent Nonexistent = "Nonexistent" // ~state.None + // Irrelevant is used for statuses that aren't meaningful for worker nodes + Irrelevant = "Irrelevant" ) // Status holds string representations of component states type Status struct { + Name string Host string Kubelet string APIServer string @@ -69,7 +72,8 @@ const ( minikubeNotRunningStatusFlag = 1 << 0 clusterNotRunningStatusFlag = 1 << 1 k8sNotRunningStatusFlag = 1 << 2 - defaultStatusFormat = `host: {{.Host}} + defaultStatusFormat = `{{.Name}} +host: {{.Host}} kubelet: {{.Kubelet}} apiserver: {{.APIServer}} kubeconfig: {{.Kubeconfig}} @@ -95,26 +99,35 @@ var statusCmd = &cobra.Command{ } defer api.Close() - machineName := viper.GetString(config.MachineProfile) - st, err := status(api, machineName) + cluster := viper.GetString(config.MachineProfile) + cc, err := config.Load(cluster) if err != nil { - glog.Errorf("status error: %v", err) - } - if st.Host == Nonexistent { - glog.Errorf("The %q cluster does not exist!", machineName) + exit.WithError("getting config", err) } - switch strings.ToLower(output) { - case "text": - if err := statusText(st, os.Stdout); err != nil { - exit.WithError("status text failure", err) + var st *Status + for _, n := range cc.Nodes { + machineName := fmt.Sprintf("%s-%s", cluster, n.Name) + st, err = status(api, machineName, n.ControlPlane) + if err != nil { + glog.Errorf("status error: %v", err) } - case "json": - if err := statusJSON(st, os.Stdout); err != nil { - exit.WithError("status json failure", err) + if st.Host == Nonexistent { + glog.Errorf("The %q host does not exist!", machineName) + } + + switch strings.ToLower(output) { + case "text": + if err := statusText(st, os.Stdout); err != nil { + exit.WithError("status text failure", err) + } + case "json": + if err := statusJSON(st, os.Stdout); err != nil { + exit.WithError("status json failure", err) + } + default: + exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'text', 'json'", output)) } - default: - exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'text', 'json'", output)) } os.Exit(exitCode(st)) @@ -126,17 +139,22 @@ func exitCode(st *Status) int { if st.Host != state.Running.String() { c |= minikubeNotRunningStatusFlag } - if st.APIServer != state.Running.String() || st.Kubelet != state.Running.String() { + if (st.APIServer != state.Running.String() && st.APIServer != Irrelevant) || st.Kubelet != state.Running.String() { c |= clusterNotRunningStatusFlag } - if st.Kubeconfig != Configured { + if st.Kubeconfig != Configured && st.Kubeconfig != Irrelevant { c |= k8sNotRunningStatusFlag } return c } -func status(api libmachine.API, name string) (*Status, error) { +func status(api libmachine.API, name string, controlPlane bool) (*Status, error) { + + profile := strings.Split(name, "-")[0] + node := strings.Split(name, "-")[1] + st := &Status{ + Name: node, Host: Nonexistent, APIServer: Nonexistent, Kubelet: Nonexistent, @@ -179,10 +197,17 @@ func status(api libmachine.API, name string) (*Status, error) { } st.Kubeconfig = Misconfigured - ok, err := kubeconfig.IsClusterInConfig(ip, name) - glog.Infof("%s is in kubeconfig at ip %s: %v (err=%v)", name, ip, ok, err) - if ok { - st.Kubeconfig = Configured + if !controlPlane { + st.Kubeconfig = Irrelevant + st.APIServer = Irrelevant + } + + if st.Kubeconfig != Irrelevant { + ok, err := kubeconfig.IsClusterInConfig(ip, profile) + glog.Infof("%s is in kubeconfig at ip %s: %v (err=%v)", name, ip, ok, err) + if ok { + st.Kubeconfig = Configured + } } host, err := machine.CheckIfHostExistsAndLoad(api, name) @@ -205,14 +230,16 @@ func status(api libmachine.API, name string) (*Status, error) { st.Kubelet = stk.String() } - sta, err := kverify.APIServerStatus(cr, ip, port) - glog.Infof("%s apiserver status = %s (err=%v)", name, stk, err) + if st.APIServer != Irrelevant { + sta, err := kverify.APIServerStatus(cr, ip, port) + glog.Infof("%s apiserver status = %s (err=%v)", name, stk, err) - if err != nil { - glog.Errorln("Error apiserver status:", err) - st.APIServer = state.Error.String() - } else { - st.APIServer = sta.String() + if err != nil { + glog.Errorln("Error apiserver status:", err) + st.APIServer = state.Error.String() + } else { + st.APIServer = sta.String() + } } return st, nil diff --git a/pkg/drivers/hyperkit/driver.go b/pkg/drivers/hyperkit/driver.go index 9659237408..d3d1031629 100644 --- a/pkg/drivers/hyperkit/driver.go +++ b/pkg/drivers/hyperkit/driver.go @@ -65,7 +65,6 @@ type Driver struct { UUID string VpnKitSock string VSockPorts []string - ClusterName string } // NewDriver creates a new driver for a host @@ -200,7 +199,7 @@ func (d *Driver) Restart() error { } func (d *Driver) createHost() (*hyperkit.HyperKit, error) { - stateDir := filepath.Join(d.StorePath, "machines", d.ClusterName, d.MachineName) + stateDir := filepath.Join(d.StorePath, "machines", d.MachineName) h, err := hyperkit.New("", d.VpnKitSock, stateDir) if err != nil { return nil, errors.Wrap(err, "new-ing Hyperkit") diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 6d1110c5de..5ae6c09033 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -36,6 +36,7 @@ import ( "github.com/docker/machine/libmachine/state" "github.com/golang/glog" "github.com/pkg/errors" + "github.com/spf13/viper" "k8s.io/client-go/kubernetes" kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/minikube/pkg/drivers/kic" @@ -73,7 +74,7 @@ func NewBootstrapper(api libmachine.API, name string) (*Bootstrapper, error) { if err != nil { return nil, errors.Wrap(err, "command runner") } - return &Bootstrapper{c: runner, contextName: name, k8sClient: nil}, nil + return &Bootstrapper{c: runner, contextName: viper.GetString(config.MachineProfile), k8sClient: nil}, nil } // GetKubeletStatus returns the kubelet status diff --git a/pkg/minikube/cluster/cluster.go b/pkg/minikube/cluster/cluster.go index a2b9e06613..a38a4cf8b6 100644 --- a/pkg/minikube/cluster/cluster.go +++ b/pkg/minikube/cluster/cluster.go @@ -42,12 +42,12 @@ func init() { } // Bootstrapper returns a new bootstrapper for the cluster -func Bootstrapper(api libmachine.API, bootstrapperName string, machineName string) (bootstrapper.Bootstrapper, error) { +func Bootstrapper(api libmachine.API, bootstrapperName string, cluster string, nodeName string) (bootstrapper.Bootstrapper, error) { var b bootstrapper.Bootstrapper var err error switch bootstrapperName { case bootstrapper.Kubeadm: - b, err = kubeadm.NewBootstrapper(api, machineName) + b, err = kubeadm.NewBootstrapper(api, fmt.Sprintf("%s-%s", cluster, nodeName)) if err != nil { return nil, errors.Wrap(err, "getting a new kubeadm bootstrapper") } diff --git a/pkg/minikube/cluster/setup.go b/pkg/minikube/cluster/setup.go index d8b5e3ed48..b784cb01e9 100644 --- a/pkg/minikube/cluster/setup.go +++ b/pkg/minikube/cluster/setup.go @@ -103,7 +103,7 @@ func InitialSetup(cc config.ClusterConfig, n config.Node, existingAddons map[str // setupKubeAdm adds any requested files into the VM before Kubernetes is started func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node) bootstrapper.Bootstrapper { - bs, err := Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), n.Name) + bs, err := Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cfg.Name, n.Name) if err != nil { exit.WithError("Failed to get bootstrapper", err) } diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index b7ebb5f816..6bf1aef74a 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -42,7 +42,7 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo runner, preExists, mAPI, _ := cluster.StartMachine(&cc, &n) defer mAPI.Close() - bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), n.Name) + bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cc.Name, n.Name) if err != nil { exit.WithError("Failed to get bootstrapper", err) } @@ -87,7 +87,7 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo if err != nil { exit.WithError("Getting primary control plane", err) } - cpBs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cp.Name) + cpBs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cc.Name, cp.Name) if err != nil { exit.WithError("Getting bootstrapper", err) } diff --git a/pkg/minikube/registry/drvs/docker/docker.go b/pkg/minikube/registry/drvs/docker/docker.go index d5278a8f92..4f3acbad62 100644 --- a/pkg/minikube/registry/drvs/docker/docker.go +++ b/pkg/minikube/registry/drvs/docker/docker.go @@ -45,7 +45,7 @@ func init() { func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) { return kic.NewDriver(kic.Config{ - MachineName: n.Name, + MachineName: fmt.Sprintf("%s-%s", mc.Name, n.Name), StorePath: localpath.MiniPath(), ImageDigest: kic.BaseImage, CPU: mc.CPUs, diff --git a/pkg/minikube/registry/drvs/hyperkit/hyperkit.go b/pkg/minikube/registry/drvs/hyperkit/hyperkit.go index b9e4b4f09f..0f0609176e 100644 --- a/pkg/minikube/registry/drvs/hyperkit/hyperkit.go +++ b/pkg/minikube/registry/drvs/hyperkit/hyperkit.go @@ -65,11 +65,10 @@ func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) { return &hyperkit.Driver{ BaseDriver: &drivers.BaseDriver{ - MachineName: cfg.Name, + MachineName: fmt.Sprintf("%s-%s", cfg.Name, n.Name), StorePath: localpath.MiniPath(), SSHUser: "docker", }, - ClusterName: cfg.Name, Boot2DockerURL: cfg.Downloader.GetISOFileURI(cfg.MinikubeISO), DiskSize: cfg.DiskSize, Memory: cfg.Memory, diff --git a/pkg/minikube/registry/drvs/hyperv/hyperv.go b/pkg/minikube/registry/drvs/hyperv/hyperv.go index 841d38d540..fccb1b9076 100644 --- a/pkg/minikube/registry/drvs/hyperv/hyperv.go +++ b/pkg/minikube/registry/drvs/hyperv/hyperv.go @@ -53,7 +53,7 @@ func init() { } func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) { - d := hyperv.NewDriver(n.Name, localpath.MiniPath()) + d := hyperv.NewDriver(fmt.Sprintf("%s-%s", mc.Name, n.Name), localpath.MiniPath()) d.Boot2DockerURL = cfg.Downloader.GetISOFileURI(cfg.MinikubeISO) d.VSwitch = cfg.HypervVirtualSwitch if d.VSwitch == "" && cfg.HypervUseExternalSwitch { diff --git a/pkg/minikube/registry/drvs/kvm2/kvm2.go b/pkg/minikube/registry/drvs/kvm2/kvm2.go index 5dd00c12c3..eb1e2d1773 100644 --- a/pkg/minikube/registry/drvs/kvm2/kvm2.go +++ b/pkg/minikube/registry/drvs/kvm2/kvm2.go @@ -68,7 +68,7 @@ type kvmDriver struct { } func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) { - name := n.Name + name := fmt.Sprintf("%s-%s", mc.Name, n.Name) return kvmDriver{ BaseDriver: &drivers.BaseDriver{ MachineName: name, diff --git a/pkg/minikube/registry/drvs/parallels/parallels.go b/pkg/minikube/registry/drvs/parallels/parallels.go index 29095a6226..a2ae347949 100644 --- a/pkg/minikube/registry/drvs/parallels/parallels.go +++ b/pkg/minikube/registry/drvs/parallels/parallels.go @@ -45,7 +45,7 @@ func init() { } func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) { - d := parallels.NewDriver(n.Name, localpath.MiniPath()).(*parallels.Driver) + d := parallels.NewDriver(fmt.Sprintf("%s-%s", cfg.Name, n.Name), localpath.MiniPath()).(*parallels.Driver) d.Boot2DockerURL = cfg.Downloader.GetISOFileURI(cfg.MinikubeISO) d.Memory = cfg.Memory d.CPU = cfg.CPUs diff --git a/pkg/minikube/registry/drvs/podman/podman.go b/pkg/minikube/registry/drvs/podman/podman.go index eab3200b83..e05c6671f2 100644 --- a/pkg/minikube/registry/drvs/podman/podman.go +++ b/pkg/minikube/registry/drvs/podman/podman.go @@ -51,7 +51,7 @@ func init() { func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) { return kic.NewDriver(kic.Config{ - MachineName: n.Name, + MachineName: fmt.Sprintf("%s-%s", mc.Name, n.Name), StorePath: localpath.MiniPath(), ImageDigest: strings.Split(kic.BaseImage, "@")[0], // for podman does not support docker images references with both a tag and digest. CPU: mc.CPUs, diff --git a/pkg/minikube/registry/drvs/virtualbox/virtualbox.go b/pkg/minikube/registry/drvs/virtualbox/virtualbox.go index 7dd13af948..fcbbcc9440 100644 --- a/pkg/minikube/registry/drvs/virtualbox/virtualbox.go +++ b/pkg/minikube/registry/drvs/virtualbox/virtualbox.go @@ -50,7 +50,7 @@ func init() { } func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) { - d := virtualbox.NewDriver(n.Name, localpath.MiniPath()) + d := virtualbox.NewDriver(fmt.Sprintf("%s-%s", mc.Name, n.Name), localpath.MiniPath()) d.Boot2DockerURL = mc.Downloader.GetISOFileURI(mc.MinikubeISO) d.Memory = mc.Memory d.CPU = mc.CPUs diff --git a/pkg/minikube/registry/drvs/vmware/vmware.go b/pkg/minikube/registry/drvs/vmware/vmware.go index b6a90929c9..33b3e7c586 100644 --- a/pkg/minikube/registry/drvs/vmware/vmware.go +++ b/pkg/minikube/registry/drvs/vmware/vmware.go @@ -40,7 +40,7 @@ func init() { } func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) { - d := vmwcfg.NewConfig(n.Name, localpath.MiniPath()) + d := vmwcfg.NewConfig(fmt.Sprintf("%s-%s", mc.Name, n.Name), localpath.MiniPath()) d.Boot2DockerURL = mc.Downloader.GetISOFileURI(mc.MinikubeISO) d.Memory = mc.Memory d.CPU = mc.CPUs diff --git a/pkg/minikube/registry/drvs/vmwarefusion/vmwarefusion.go b/pkg/minikube/registry/drvs/vmwarefusion/vmwarefusion.go index adc50d70e8..47047ffe3f 100644 --- a/pkg/minikube/registry/drvs/vmwarefusion/vmwarefusion.go +++ b/pkg/minikube/registry/drvs/vmwarefusion/vmwarefusion.go @@ -45,7 +45,7 @@ func init() { } func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) { - d := vmwarefusion.NewDriver(n.Name, localpath.MiniPath()).(*vmwarefusion.Driver) + d := vmwarefusion.NewDriver(fmt.Sprintf("%s-%s", cfg.Name, n.Name), localpath.MiniPath()).(*vmwarefusion.Driver) d.Boot2DockerURL = cfg.Downloader.GetISOFileURI(cfg.MinikubeISO) d.Memory = cfg.Memory d.CPU = cfg.CPUs diff --git a/pkg/provision/provision.go b/pkg/provision/provision.go index 52fb131960..ff5f08fef8 100644 --- a/pkg/provision/provision.go +++ b/pkg/provision/provision.go @@ -195,7 +195,8 @@ func setRemoteAuthOptions(p provision.Provisioner) auth.Options { } func setContainerRuntimeOptions(name string, p miniProvisioner) error { - c, err := config.Load(name) + cluster := strings.Split(name, "-")[0] + c, err := config.Load(cluster) if err != nil { return errors.Wrap(err, "getting cluster config") } From e3826a5e4756714b69b305b4a0f4101fcf5b5b94 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Tue, 25 Feb 2020 17:18:25 -0800 Subject: [PATCH 14/42] fix ip command --- cmd/minikube/cmd/ip.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cmd/minikube/cmd/ip.go b/cmd/minikube/cmd/ip.go index 5d00182dde..e4ef171a1a 100644 --- a/cmd/minikube/cmd/ip.go +++ b/cmd/minikube/cmd/ip.go @@ -17,6 +17,8 @@ limitations under the License. package cmd import ( + "fmt" + "github.com/docker/machine/libmachine/mcnerror" "github.com/pkg/errors" "github.com/spf13/cobra" @@ -43,11 +45,12 @@ var ipCmd = &cobra.Command{ if err != nil { exit.WithError("Error getting config", err) } - host, err := api.Load(cc.Name) + machineName := fmt.Sprintf("%s-%s", cc.Name, cc.Nodes[0].Name) + host, err := api.Load(machineName) if err != nil { switch err := errors.Cause(err).(type) { case mcnerror.ErrHostDoesNotExist: - exit.WithCodeT(exit.NoInput, `"{{.profile_name}}" host does not exist, unable to show an IP`, out.V{"profile_name": cc.Name}) + exit.WithCodeT(exit.NoInput, `"{{.profile_name}}" host does not exist, unable to show an IP`, out.V{"profile_name": machineName}) default: exit.WithError("Error getting host", err) } From 676588f6b6f811e6941314d8cc40ef535743ff9b Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Tue, 25 Feb 2020 23:47:08 -0800 Subject: [PATCH 15/42] refactor machine name creation into a function --- cmd/minikube/cmd/ip.go | 5 ++--- cmd/minikube/cmd/ssh.go | 13 +++++++++++-- cmd/minikube/cmd/status.go | 3 ++- cmd/minikube/cmd/status_test.go | 12 ++++++------ pkg/minikube/cluster/cluster.go | 3 ++- pkg/minikube/driver/driver.go | 5 +++++ pkg/minikube/machine/cluster_test.go | 11 +++++++---- pkg/minikube/machine/machine.go | 5 +---- pkg/minikube/registry/drvs/docker/docker.go | 2 +- pkg/minikube/registry/drvs/hyperkit/hyperkit.go | 2 +- pkg/minikube/registry/drvs/hyperv/hyperv.go | 2 +- pkg/minikube/registry/drvs/kvm2/kvm2.go | 2 +- pkg/minikube/registry/drvs/parallels/parallels.go | 2 +- pkg/minikube/registry/drvs/podman/podman.go | 2 +- pkg/minikube/registry/drvs/virtualbox/virtualbox.go | 2 +- pkg/minikube/registry/drvs/vmware/vmware.go | 2 +- .../registry/drvs/vmwarefusion/vmwarefusion.go | 2 +- 17 files changed, 45 insertions(+), 30 deletions(-) diff --git a/cmd/minikube/cmd/ip.go b/cmd/minikube/cmd/ip.go index e4ef171a1a..a6b607eb12 100644 --- a/cmd/minikube/cmd/ip.go +++ b/cmd/minikube/cmd/ip.go @@ -17,13 +17,12 @@ limitations under the License. package cmd import ( - "fmt" - "github.com/docker/machine/libmachine/mcnerror" "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/out" @@ -45,7 +44,7 @@ var ipCmd = &cobra.Command{ if err != nil { exit.WithError("Error getting config", err) } - machineName := fmt.Sprintf("%s-%s", cc.Name, cc.Nodes[0].Name) + machineName := driver.MachineName(cc.Name, cc.Nodes[0].Name) host, err := api.Load(machineName) if err != nil { switch err := errors.Cause(err).(type) { diff --git a/cmd/minikube/cmd/ssh.go b/cmd/minikube/cmd/ssh.go index 84dead06e1..2ae2e5febd 100644 --- a/cmd/minikube/cmd/ssh.go +++ b/cmd/minikube/cmd/ssh.go @@ -45,7 +45,15 @@ var sshCmd = &cobra.Command{ if err != nil { exit.WithError("Error getting config", err) } - host, err := machine.CheckIfHostExistsAndLoad(api, cc.Name) + + if nodeName == "" { + cp, err := config.PrimaryControlPlane(*cc) + if err != nil { + exit.WithError("Getting primary control plane", err) + } + nodeName = cp.Name + } + host, err := machine.CheckIfHostExistsAndLoad(api, driver.MachineName(cc.Name, nodeName)) if err != nil { exit.WithError("Error getting host", err) } @@ -58,7 +66,7 @@ var sshCmd = &cobra.Command{ ssh.SetDefaultClient(ssh.External) } - err = machine.CreateSSHShell(api, args) + err = machine.CreateSSHShell(api, driver.MachineName(cc.Name, nodeName), args) if err != nil { // This is typically due to a non-zero exit code, so no need for flourish. out.ErrLn("ssh: %v", err) @@ -70,4 +78,5 @@ var sshCmd = &cobra.Command{ func init() { sshCmd.Flags().Bool(nativeSSH, true, "Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'.") + sshCmd.Flags().StringVarP(&nodeName, "node", "n", "", "The node to ssh into. Defaults to the primary control plane.") } diff --git a/cmd/minikube/cmd/status.go b/cmd/minikube/cmd/status.go index dfd62da6eb..b7e3005c23 100644 --- a/cmd/minikube/cmd/status.go +++ b/cmd/minikube/cmd/status.go @@ -34,6 +34,7 @@ import ( "k8s.io/minikube/pkg/minikube/cluster" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" + "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/kubeconfig" "k8s.io/minikube/pkg/minikube/machine" @@ -107,7 +108,7 @@ var statusCmd = &cobra.Command{ var st *Status for _, n := range cc.Nodes { - machineName := fmt.Sprintf("%s-%s", cluster, n.Name) + machineName := driver.MachineName(cluster, n.Name) st, err = status(api, machineName, n.ControlPlane) if err != nil { glog.Errorf("status error: %v", err) diff --git a/cmd/minikube/cmd/status_test.go b/cmd/minikube/cmd/status_test.go index ef414631f8..44f4133dfd 100644 --- a/cmd/minikube/cmd/status_test.go +++ b/cmd/minikube/cmd/status_test.go @@ -51,18 +51,18 @@ func TestStatusText(t *testing.T) { }{ { name: "ok", - state: &Status{Host: "Running", Kubelet: "Running", APIServer: "Running", Kubeconfig: Configured}, - want: "host: Running\nkubelet: Running\napiserver: Running\nkubeconfig: Configured\n", + state: &Status{Name: "minikube", Host: "Running", Kubelet: "Running", APIServer: "Running", Kubeconfig: Configured}, + want: "minikube\nhost: Running\nkubelet: Running\napiserver: Running\nkubeconfig: Configured\n", }, { name: "paused", - state: &Status{Host: "Running", Kubelet: "Stopped", APIServer: "Paused", Kubeconfig: Configured}, - want: "host: Running\nkubelet: Stopped\napiserver: Paused\nkubeconfig: Configured\n", + state: &Status{Name: "minikube", Host: "Running", Kubelet: "Stopped", APIServer: "Paused", Kubeconfig: Configured}, + want: "minikube\nhost: Running\nkubelet: Stopped\napiserver: Paused\nkubeconfig: Configured\n", }, { name: "down", - state: &Status{Host: "Stopped", Kubelet: "Stopped", APIServer: "Stopped", Kubeconfig: Misconfigured}, - want: "host: Stopped\nkubelet: Stopped\napiserver: Stopped\nkubeconfig: Misconfigured\n\nWARNING: Your kubectl is pointing to stale minikube-vm.\nTo fix the kubectl context, run `minikube update-context`\n", + state: &Status{Name: "minikube", Host: "Stopped", Kubelet: "Stopped", APIServer: "Stopped", Kubeconfig: Misconfigured}, + want: "minikube\nhost: Stopped\nkubelet: Stopped\napiserver: Stopped\nkubeconfig: Misconfigured\n\nWARNING: Your kubectl is pointing to stale minikube-vm.\nTo fix the kubectl context, run `minikube update-context`\n", }, } for _, tc := range tests { diff --git a/pkg/minikube/cluster/cluster.go b/pkg/minikube/cluster/cluster.go index a38a4cf8b6..fd45d789aa 100644 --- a/pkg/minikube/cluster/cluster.go +++ b/pkg/minikube/cluster/cluster.go @@ -26,6 +26,7 @@ import ( "k8s.io/minikube/pkg/minikube/bootstrapper" "k8s.io/minikube/pkg/minikube/bootstrapper/kubeadm" + "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" ) @@ -47,7 +48,7 @@ func Bootstrapper(api libmachine.API, bootstrapperName string, cluster string, n var err error switch bootstrapperName { case bootstrapper.Kubeadm: - b, err = kubeadm.NewBootstrapper(api, fmt.Sprintf("%s-%s", cluster, nodeName)) + b, err = kubeadm.NewBootstrapper(api, driver.MachineName(cluster, nodeName)) if err != nil { return nil, errors.Wrap(err, "getting a new kubeadm bootstrapper") } diff --git a/pkg/minikube/driver/driver.go b/pkg/minikube/driver/driver.go index eb122f3f2f..21ebc7c432 100644 --- a/pkg/minikube/driver/driver.go +++ b/pkg/minikube/driver/driver.go @@ -108,6 +108,11 @@ func BareMetal(name string) bool { return name == None || name == Mock } +// MachineName return the name of the machine given proper config +func MachineName(cluster string, node string) string { + return fmt.Sprintf("%s-%s", cluster, node) +} + // NeedsRoot returns true if driver needs to run with root privileges func NeedsRoot(name string) bool { return name == None || name == Podman diff --git a/pkg/minikube/machine/cluster_test.go b/pkg/minikube/machine/cluster_test.go index 449acd729e..2b26215c6c 100644 --- a/pkg/minikube/machine/cluster_test.go +++ b/pkg/minikube/machine/cluster_test.go @@ -427,19 +427,22 @@ func TestCreateSSHShell(t *testing.T) { t.Fatalf("Error starting ssh server: %v", err) } + m := viper.GetString("profile") + d := &tests.MockDriver{ Port: port, CurrentState: state.Running, BaseDriver: drivers.BaseDriver{ - IPAddress: "127.0.0.1", - SSHKeyPath: "", + IPAddress: "127.0.0.1", + SSHKeyPath: "", + MachineName: m, }, T: t, } - api.Hosts[viper.GetString("profile")] = &host.Host{Driver: d} + api.Hosts[m] = &host.Host{Driver: d} cliArgs := []string{"exit"} - if err := CreateSSHShell(api, cliArgs); err != nil { + if err := CreateSSHShell(api, m, cliArgs); err != nil { t.Fatalf("Error running ssh command: %v", err) } diff --git a/pkg/minikube/machine/machine.go b/pkg/minikube/machine/machine.go index 7295b33dab..5b65fd59d1 100644 --- a/pkg/minikube/machine/machine.go +++ b/pkg/minikube/machine/machine.go @@ -25,8 +25,6 @@ import ( "github.com/docker/machine/libmachine/state" "github.com/golang/glog" "github.com/pkg/errors" - "github.com/spf13/viper" - "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/localpath" ) @@ -124,8 +122,7 @@ func machineDirs(miniHome ...string) (dirs []string, err error) { } // CreateSSHShell creates a new SSH shell / client -func CreateSSHShell(api libmachine.API, args []string) error { - machineName := viper.GetString(config.MachineProfile) +func CreateSSHShell(api libmachine.API, machineName string, args []string) error { host, err := CheckIfHostExistsAndLoad(api, machineName) if err != nil { return errors.Wrap(err, "host exists and load") diff --git a/pkg/minikube/registry/drvs/docker/docker.go b/pkg/minikube/registry/drvs/docker/docker.go index 4f3acbad62..af8a8f27cd 100644 --- a/pkg/minikube/registry/drvs/docker/docker.go +++ b/pkg/minikube/registry/drvs/docker/docker.go @@ -45,7 +45,7 @@ func init() { func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) { return kic.NewDriver(kic.Config{ - MachineName: fmt.Sprintf("%s-%s", mc.Name, n.Name), + MachineName: driver.MachineName(mc.Name, n.Name), StorePath: localpath.MiniPath(), ImageDigest: kic.BaseImage, CPU: mc.CPUs, diff --git a/pkg/minikube/registry/drvs/hyperkit/hyperkit.go b/pkg/minikube/registry/drvs/hyperkit/hyperkit.go index 0f0609176e..5fe099b79f 100644 --- a/pkg/minikube/registry/drvs/hyperkit/hyperkit.go +++ b/pkg/minikube/registry/drvs/hyperkit/hyperkit.go @@ -65,7 +65,7 @@ func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) { return &hyperkit.Driver{ BaseDriver: &drivers.BaseDriver{ - MachineName: fmt.Sprintf("%s-%s", cfg.Name, n.Name), + MachineName: driver.MachineName(cfg.Name, n.Name), StorePath: localpath.MiniPath(), SSHUser: "docker", }, diff --git a/pkg/minikube/registry/drvs/hyperv/hyperv.go b/pkg/minikube/registry/drvs/hyperv/hyperv.go index fccb1b9076..55440fb711 100644 --- a/pkg/minikube/registry/drvs/hyperv/hyperv.go +++ b/pkg/minikube/registry/drvs/hyperv/hyperv.go @@ -53,7 +53,7 @@ func init() { } func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) { - d := hyperv.NewDriver(fmt.Sprintf("%s-%s", mc.Name, n.Name), localpath.MiniPath()) + d := hyperv.NewDriver(driver.MachineName(mc.Name, n.Name), localpath.MiniPath()) d.Boot2DockerURL = cfg.Downloader.GetISOFileURI(cfg.MinikubeISO) d.VSwitch = cfg.HypervVirtualSwitch if d.VSwitch == "" && cfg.HypervUseExternalSwitch { diff --git a/pkg/minikube/registry/drvs/kvm2/kvm2.go b/pkg/minikube/registry/drvs/kvm2/kvm2.go index eb1e2d1773..90071e6bcf 100644 --- a/pkg/minikube/registry/drvs/kvm2/kvm2.go +++ b/pkg/minikube/registry/drvs/kvm2/kvm2.go @@ -68,7 +68,7 @@ type kvmDriver struct { } func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) { - name := fmt.Sprintf("%s-%s", mc.Name, n.Name) + name := driver.MachineName(mc.Name, n.Name) return kvmDriver{ BaseDriver: &drivers.BaseDriver{ MachineName: name, diff --git a/pkg/minikube/registry/drvs/parallels/parallels.go b/pkg/minikube/registry/drvs/parallels/parallels.go index a2ae347949..8f892cb0b0 100644 --- a/pkg/minikube/registry/drvs/parallels/parallels.go +++ b/pkg/minikube/registry/drvs/parallels/parallels.go @@ -45,7 +45,7 @@ func init() { } func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) { - d := parallels.NewDriver(fmt.Sprintf("%s-%s", cfg.Name, n.Name), localpath.MiniPath()).(*parallels.Driver) + d := parallels.NewDriver(driver.MachineName(cfg.Name, n.Name), localpath.MiniPath()).(*parallels.Driver) d.Boot2DockerURL = cfg.Downloader.GetISOFileURI(cfg.MinikubeISO) d.Memory = cfg.Memory d.CPU = cfg.CPUs diff --git a/pkg/minikube/registry/drvs/podman/podman.go b/pkg/minikube/registry/drvs/podman/podman.go index e05c6671f2..f2c9a0329f 100644 --- a/pkg/minikube/registry/drvs/podman/podman.go +++ b/pkg/minikube/registry/drvs/podman/podman.go @@ -51,7 +51,7 @@ func init() { func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) { return kic.NewDriver(kic.Config{ - MachineName: fmt.Sprintf("%s-%s", mc.Name, n.Name), + MachineName: driver.MachineName(mc.Name, n.Name), StorePath: localpath.MiniPath(), ImageDigest: strings.Split(kic.BaseImage, "@")[0], // for podman does not support docker images references with both a tag and digest. CPU: mc.CPUs, diff --git a/pkg/minikube/registry/drvs/virtualbox/virtualbox.go b/pkg/minikube/registry/drvs/virtualbox/virtualbox.go index fcbbcc9440..04c084c249 100644 --- a/pkg/minikube/registry/drvs/virtualbox/virtualbox.go +++ b/pkg/minikube/registry/drvs/virtualbox/virtualbox.go @@ -50,7 +50,7 @@ func init() { } func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) { - d := virtualbox.NewDriver(fmt.Sprintf("%s-%s", mc.Name, n.Name), localpath.MiniPath()) + d := virtualbox.NewDriver(driver.MachineName(mc.Name, n.Name), localpath.MiniPath()) d.Boot2DockerURL = mc.Downloader.GetISOFileURI(mc.MinikubeISO) d.Memory = mc.Memory d.CPU = mc.CPUs diff --git a/pkg/minikube/registry/drvs/vmware/vmware.go b/pkg/minikube/registry/drvs/vmware/vmware.go index 33b3e7c586..65dceda11b 100644 --- a/pkg/minikube/registry/drvs/vmware/vmware.go +++ b/pkg/minikube/registry/drvs/vmware/vmware.go @@ -40,7 +40,7 @@ func init() { } func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) { - d := vmwcfg.NewConfig(fmt.Sprintf("%s-%s", mc.Name, n.Name), localpath.MiniPath()) + d := vmwcfg.NewConfig(driver.MachineName(mc.Name, n.Name), localpath.MiniPath()) d.Boot2DockerURL = mc.Downloader.GetISOFileURI(mc.MinikubeISO) d.Memory = mc.Memory d.CPU = mc.CPUs diff --git a/pkg/minikube/registry/drvs/vmwarefusion/vmwarefusion.go b/pkg/minikube/registry/drvs/vmwarefusion/vmwarefusion.go index 47047ffe3f..5f73cb6949 100644 --- a/pkg/minikube/registry/drvs/vmwarefusion/vmwarefusion.go +++ b/pkg/minikube/registry/drvs/vmwarefusion/vmwarefusion.go @@ -45,7 +45,7 @@ func init() { } func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) { - d := vmwarefusion.NewDriver(fmt.Sprintf("%s-%s", cfg.Name, n.Name), localpath.MiniPath()).(*vmwarefusion.Driver) + d := vmwarefusion.NewDriver(driver.MachineName(cfg.Name, n.Name), localpath.MiniPath()).(*vmwarefusion.Driver) d.Boot2DockerURL = cfg.Downloader.GetISOFileURI(cfg.MinikubeISO) d.Memory = cfg.Memory d.CPU = cfg.CPUs From d7df027fb8a56b6daa93bf2d898e12ca9dc97043 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 26 Feb 2020 15:26:05 -0800 Subject: [PATCH 16/42] fix delete and stop --- cmd/minikube/cmd/delete.go | 7 ++- cmd/minikube/cmd/stop.go | 44 +++++++------------ pkg/drivers/hyperkit/driver.go | 1 - pkg/minikube/bootstrapper/bootstrapper.go | 2 +- .../bootstrapper/bsutil/kverify/kverify.go | 2 +- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 4 +- pkg/minikube/cluster/setup.go | 2 +- pkg/minikube/node/start.go | 2 +- 8 files changed, 25 insertions(+), 39 deletions(-) diff --git a/cmd/minikube/cmd/delete.go b/cmd/minikube/cmd/delete.go index 8ed30668b4..9169a4fa71 100644 --- a/cmd/minikube/cmd/delete.go +++ b/cmd/minikube/cmd/delete.go @@ -142,10 +142,9 @@ func runDelete(cmd *cobra.Command, args []string) { exit.UsageT("usage: minikube delete") } - profileName := viper.GetString(pkg_config.MachineProfile) - profile, err := pkg_config.LoadProfile(profileName) + profile, err := pkg_config.LoadProfile(profileFlag) if err != nil { - out.ErrT(out.Meh, `"{{.name}}" profile does not exist, trying anyways.`, out.V{"name": profileName}) + out.ErrT(out.Meh, `"{{.name}}" profile does not exist, trying anyways.`, out.V{"name": profileFlag}) } errs := DeleteProfiles([]*pkg_config.Profile{profile}) @@ -237,7 +236,7 @@ func deleteProfile(profile *pkg_config.Profile) error { } for _, n := range cc.Nodes { - if err = machine.DeleteHost(api, n.Name); err != nil { + if err = machine.DeleteHost(api, driver.MachineName(profile.Name, n.Name)); err != nil { switch errors.Cause(err).(type) { case mcnerror.ErrHostDoesNotExist: glog.Infof("%s cluster does not exist. Proceeding ahead with cleanup.", profile.Name) diff --git a/cmd/minikube/cmd/stop.go b/cmd/minikube/cmd/stop.go index 0c5ae89e71..a1bb3e782c 100644 --- a/cmd/minikube/cmd/stop.go +++ b/cmd/minikube/cmd/stop.go @@ -17,19 +17,15 @@ limitations under the License. package cmd import ( - "time" - - "github.com/docker/machine/libmachine/mcnerror" - "github.com/golang/glog" - "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/viper" + "k8s.io/minikube/pkg/minikube/config" pkg_config "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/kubeconfig" "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/out" - "k8s.io/minikube/pkg/util/retry" ) // stopCmd represents the stop command @@ -50,31 +46,23 @@ func runStop(cmd *cobra.Command, args []string) { } defer api.Close() - nonexistent := false - stop := func() (err error) { - err = machine.StopHost(api, profile) - if err == nil { - return nil - } - glog.Warningf("stop host returned error: %v", err) - - switch err := errors.Cause(err).(type) { - case mcnerror.ErrHostDoesNotExist: - out.T(out.Meh, `"{{.profile_name}}" does not exist, nothing to stop`, out.V{"profile_name": profile}) - nonexistent = true - return nil - default: - return err + cc, err := config.Load(profile) + if err != nil { + exit.WithError("Error retrieving config", err) + } + + // TODO replace this back with expo backoff + for _, n := range cc.Nodes { + err := machine.StopHost(api, driver.MachineName(profile, n.Name)) + if err != nil { + exit.WithError("Unable to stop VM", err) } + /*if err := retry.Expo(fn, 5*time.Second, 3*time.Minute, 5); err != nil { + exit.WithError("Unable to stop VM", err) + }*/ } - if err := retry.Expo(stop, 5*time.Second, 3*time.Minute, 5); err != nil { - exit.WithError("Unable to stop VM", err) - } - - if !nonexistent { - out.T(out.Stopped, `"{{.profile_name}}" stopped.`, out.V{"profile_name": profile}) - } + out.T(out.Stopped, `"{{.profile_name}}" stopped.`, out.V{"profile_name": profile}) if err := killMountProcess(); err != nil { out.T(out.WarningType, "Unable to kill mount process: {{.error}}", out.V{"error": err}) diff --git a/pkg/drivers/hyperkit/driver.go b/pkg/drivers/hyperkit/driver.go index d3d1031629..bf775240e6 100644 --- a/pkg/drivers/hyperkit/driver.go +++ b/pkg/drivers/hyperkit/driver.go @@ -519,7 +519,6 @@ func (d *Driver) sendSignal(s os.Signal) error { func (d *Driver) getPid() int { pidPath := d.ResolveStorePath(machineFileName) - log.Debugf("PIDPATH=%s", pidPath) f, err := os.Open(pidPath) if err != nil { log.Warnf("Error reading pid file: %v", err) diff --git a/pkg/minikube/bootstrapper/bootstrapper.go b/pkg/minikube/bootstrapper/bootstrapper.go index 6bb03fa986..f250423833 100644 --- a/pkg/minikube/bootstrapper/bootstrapper.go +++ b/pkg/minikube/bootstrapper/bootstrapper.go @@ -39,7 +39,7 @@ type Bootstrapper interface { StartCluster(config.ClusterConfig) error UpdateCluster(config.ClusterConfig) error DeleteCluster(config.KubernetesConfig) error - WaitForCluster(config.ClusterConfig, time.Duration) error + WaitForNode(config.ClusterConfig, config.Node, time.Duration) error JoinCluster(config.ClusterConfig, config.Node, string) error UpdateNode(config.ClusterConfig, config.Node, cruntime.Manager) error GenerateToken(config.KubernetesConfig) (string, error) diff --git a/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go b/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go index aa076cecb9..bc4f03e8b0 100644 --- a/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go +++ b/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go @@ -67,7 +67,7 @@ func apiServerPID(cr command.Runner) (int, error) { return strconv.Atoi(s) } -// SystemPods verifies essential pods for running kurnetes is running +// SystemPods verifies essential pods for running kubernetes is running func SystemPods(client *kubernetes.Clientset, start time.Time, timeout time.Duration) error { glog.Info("waiting for kube-system pods to appear ...") pStart := time.Now() diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 5ae6c09033..7ccef6ee40 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -249,8 +249,8 @@ func (k *Bootstrapper) client(ip string, port int) (*kubernetes.Clientset, error return c, err } -// WaitForCluster blocks until the cluster appears to be healthy -func (k *Bootstrapper) WaitForCluster(cfg config.ClusterConfig, timeout time.Duration) error { +// WaitForCluster blocks until the node appears to be healthy +func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, timeout time.Duration) error { start := time.Now() out.T(out.Waiting, "Waiting for cluster to come online ...") cp, err := config.PrimaryControlPlane(cfg) diff --git a/pkg/minikube/cluster/setup.go b/pkg/minikube/cluster/setup.go index b784cb01e9..f09a8188c3 100644 --- a/pkg/minikube/cluster/setup.go +++ b/pkg/minikube/cluster/setup.go @@ -92,7 +92,7 @@ func InitialSetup(cc config.ClusterConfig, n config.Node, existingAddons map[str // Skip pre-existing, because we already waited for health if viper.GetBool(waitUntilHealthy) && !preExists { - if err := bs.WaitForCluster(cc, viper.GetDuration(waitTimeout)); err != nil { + if err := bs.WaitForNode(cc, n, viper.GetDuration(waitTimeout)); err != nil { exit.WithError("Wait failed", err) } } diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 6bf1aef74a..efea8548fb 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -73,7 +73,7 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo // Skip pre-existing, because we already waited for health if viper.GetBool(waitUntilHealthy) && !preExists { - if err := bs.WaitForCluster(cc, viper.GetDuration(waitTimeout)); err != nil { + if err := bs.WaitForNode(cc, n, viper.GetDuration(waitTimeout)); err != nil { exit.WithError("Wait failed", err) } } From e7f8abc048a314a4cb254007c043aaae49095dd9 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 26 Feb 2020 15:40:28 -0800 Subject: [PATCH 17/42] fix waitfornode --- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 7ccef6ee40..1316612771 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -249,16 +249,19 @@ func (k *Bootstrapper) client(ip string, port int) (*kubernetes.Clientset, error return c, err } -// WaitForCluster blocks until the node appears to be healthy +// WaitForNode blocks until the node appears to be healthy func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, timeout time.Duration) error { start := time.Now() - out.T(out.Waiting, "Waiting for cluster to come online ...") + out.T(out.Waiting, "Waiting for node {{.name}} to come online ...", out.V{"name": n.Name}) cp, err := config.PrimaryControlPlane(cfg) if err != nil { return err } - if err := kverify.APIServerProcess(k.c, start, timeout); err != nil { - return err + + if n.ControlPlane { + if err := kverify.APIServerProcess(k.c, start, timeout); err != nil { + return err + } } ip := cp.IP @@ -270,8 +273,10 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time return errors.Wrapf(err, "get host-bind port %d for container %s", port, cfg.Name) } } - if err := kverify.APIServerIsRunning(start, ip, port, timeout); err != nil { - return err + if n.ControlPlane { + if err := kverify.APIServerIsRunning(start, ip, port, timeout); err != nil { + return err + } } c, err := k.client(ip, port) @@ -369,7 +374,6 @@ func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinC // Join the master by specifying its token joinCmd = fmt.Sprintf("%s --v=10 --node-name=%s", joinCmd, n.Name) - fmt.Println(joinCmd) out, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", joinCmd)) if err != nil { return errors.Wrapf(err, "cmd failed: %s\n%+v\n", joinCmd, out) From 9f82d6855b31936c80b1addff468da917db2c7a1 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 26 Feb 2020 15:51:14 -0800 Subject: [PATCH 18/42] actually cache images on cluster startup --- cmd/minikube/cmd/cache.go | 4 ++-- pkg/minikube/cluster/setup.go | 4 ++++ pkg/minikube/config/config.go | 14 ++++++++++++++ pkg/minikube/node/cache.go | 13 ------------- pkg/minikube/node/start.go | 2 +- 5 files changed, 21 insertions(+), 16 deletions(-) diff --git a/cmd/minikube/cmd/cache.go b/cmd/minikube/cmd/cache.go index eb91371984..f91f508b00 100644 --- a/cmd/minikube/cmd/cache.go +++ b/cmd/minikube/cmd/cache.go @@ -19,10 +19,10 @@ package cmd import ( "github.com/spf13/cobra" cmdConfig "k8s.io/minikube/cmd/minikube/cmd/config" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/image" "k8s.io/minikube/pkg/minikube/machine" - "k8s.io/minikube/pkg/minikube/node" ) // cacheImageConfigKey is the config field name used to store which images we have previously cached @@ -75,7 +75,7 @@ var reloadCacheCmd = &cobra.Command{ Short: "reload cached images.", Long: "reloads images previously added using the 'cache add' subcommand", Run: func(cmd *cobra.Command, args []string) { - err := node.CacheAndLoadImagesInConfig() + err := config.CacheAndLoadImagesInConfig() if err != nil { exit.WithError("Failed to reload cached images", err) } diff --git a/pkg/minikube/cluster/setup.go b/pkg/minikube/cluster/setup.go index f09a8188c3..1d194ca900 100644 --- a/pkg/minikube/cluster/setup.go +++ b/pkg/minikube/cluster/setup.go @@ -79,6 +79,10 @@ func InitialSetup(cc config.ClusterConfig, n config.Node, existingAddons map[str exit.WithError("Error starting cluster", err) } + if err := config.CacheAndLoadImagesInConfig(); err != nil { + out.T(out.FailureType, "Unable to load cached images from config file.") + } + // enable addons, both old and new! if existingAddons != nil { addons.Start(viper.GetString(config.MachineProfile), existingAddons, config.AddonList) diff --git a/pkg/minikube/config/config.go b/pkg/minikube/config/config.go index bf7e0bdd28..c0ab5d0976 100644 --- a/pkg/minikube/config/config.go +++ b/pkg/minikube/config/config.go @@ -26,6 +26,7 @@ import ( "github.com/pkg/errors" "k8s.io/minikube/pkg/minikube/localpath" + "k8s.io/minikube/pkg/minikube/machine" ) const ( @@ -201,3 +202,16 @@ func (c *simpleConfigLoader) WriteConfigToFile(profileName string, cc *ClusterCo } return ioutil.WriteFile(path, contents, 0644) } + +// CacheAndLoadImagesInConfig loads the images currently in the config file +// called by 'start' and 'cache reload' commands. +func CacheAndLoadImagesInConfig() error { + images, err := imagesInConfigFile() + if err != nil { + return err + } + if len(images) == 0 { + return nil + } + return machine.CacheAndLoadImages(images) +} diff --git a/pkg/minikube/node/cache.go b/pkg/minikube/node/cache.go index 03b854a99e..41192d523f 100644 --- a/pkg/minikube/node/cache.go +++ b/pkg/minikube/node/cache.go @@ -116,16 +116,3 @@ func imagesInConfigFile() ([]string, error) { } return []string{}, nil } - -// CacheAndLoadImagesInConfig loads the images currently in the config file -// called by 'start' and 'cache reload' commands. -func CacheAndLoadImagesInConfig() error { - images, err := imagesInConfigFile() - if err != nil { - return err - } - if len(images) == 0 { - return nil - } - return machine.CacheAndLoadImages(images) -} diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index efea8548fb..8ce658c836 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -67,7 +67,7 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo exit.WithError("Failed to update node", err) } - if err := CacheAndLoadImagesInConfig(); err != nil { + if err := config.CacheAndLoadImagesInConfig(); err != nil { out.T(out.FailureType, "Unable to load cached images from config file.") } From 2bf3b9a9f93254fb1d46172992e70a757fcaebbf Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Thu, 27 Feb 2020 11:40:51 -0800 Subject: [PATCH 19/42] move caching code to cluster --- cmd/minikube/cmd/cache.go | 4 +-- cmd/minikube/cmd/kubectl.go | 4 +-- pkg/minikube/{node => cluster}/cache.go | 33 +++++++++++++++++++------ pkg/minikube/cluster/setup.go | 2 +- pkg/minikube/config/config.go | 14 ----------- pkg/minikube/node/node.go | 13 +++++----- pkg/minikube/node/start.go | 8 +++--- 7 files changed, 41 insertions(+), 37 deletions(-) rename pkg/minikube/{node => cluster}/cache.go (79%) diff --git a/cmd/minikube/cmd/cache.go b/cmd/minikube/cmd/cache.go index f91f508b00..ab1f075853 100644 --- a/cmd/minikube/cmd/cache.go +++ b/cmd/minikube/cmd/cache.go @@ -19,7 +19,7 @@ package cmd import ( "github.com/spf13/cobra" cmdConfig "k8s.io/minikube/cmd/minikube/cmd/config" - "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/cluster" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/image" "k8s.io/minikube/pkg/minikube/machine" @@ -75,7 +75,7 @@ var reloadCacheCmd = &cobra.Command{ Short: "reload cached images.", Long: "reloads images previously added using the 'cache add' subcommand", Run: func(cmd *cobra.Command, args []string) { - err := config.CacheAndLoadImagesInConfig() + err := cluster.CacheAndLoadImagesInConfig() if err != nil { exit.WithError("Failed to reload cached images", err) } diff --git a/cmd/minikube/cmd/kubectl.go b/cmd/minikube/cmd/kubectl.go index ebceeb9468..342b9b283d 100644 --- a/cmd/minikube/cmd/kubectl.go +++ b/cmd/minikube/cmd/kubectl.go @@ -25,10 +25,10 @@ import ( "github.com/golang/glog" "github.com/spf13/cobra" "github.com/spf13/viper" + "k8s.io/minikube/pkg/minikube/cluster" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/machine" - "k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/out" ) @@ -59,7 +59,7 @@ minikube kubectl -- get pods --namespace kube-system`, version = cc.KubernetesConfig.KubernetesVersion } - path, err := node.CacheKubectlBinary(version) + path, err := cluster.CacheKubectlBinary(version) if err != nil { out.ErrLn("Error caching kubectl: %v", err) } diff --git a/pkg/minikube/node/cache.go b/pkg/minikube/cluster/cache.go similarity index 79% rename from pkg/minikube/node/cache.go rename to pkg/minikube/cluster/cache.go index 41192d523f..04e7c8b6c6 100644 --- a/pkg/minikube/node/cache.go +++ b/pkg/minikube/cluster/cache.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package node +package cluster import ( "os" @@ -33,8 +33,13 @@ import ( "k8s.io/minikube/pkg/minikube/out" ) -// beginCacheRequiredImages caches images required for kubernetes version in the background -func beginCacheRequiredImages(g *errgroup.Group, imageRepository string, k8sVersion string) { +const ( + cacheImages = "cache-images" + cacheImageConfigKey = "cache" +) + +// BeginCacheRequiredImages caches images required for kubernetes version in the background +func BeginCacheRequiredImages(g *errgroup.Group, imageRepository string, k8sVersion string) { if !viper.GetBool("cache-images") { return } @@ -44,7 +49,8 @@ func beginCacheRequiredImages(g *errgroup.Group, imageRepository string, k8sVers }) } -func handleDownloadOnly(cacheGroup *errgroup.Group, k8sVersion string) { +// HandleDownloadOnly handles the download-only parameter +func HandleDownloadOnly(cacheGroup *errgroup.Group, k8sVersion string) { // If --download-only, complete the remaining downloads and exit. if !viper.GetBool("download-only") { return @@ -55,7 +61,7 @@ func handleDownloadOnly(cacheGroup *errgroup.Group, k8sVersion string) { if _, err := CacheKubectlBinary(k8sVersion); err != nil { exit.WithError("Failed to cache kubectl", err) } - waitCacheRequiredImages(cacheGroup) + WaitCacheRequiredImages(cacheGroup) if err := saveImagesToTarFromConfig(); err != nil { exit.WithError("Failed to cache images to tar", err) } @@ -79,8 +85,8 @@ func doCacheBinaries(k8sVersion string) error { return machine.CacheBinariesForBootstrapper(k8sVersion, viper.GetString(cmdcfg.Bootstrapper)) } -// waitCacheRequiredImages blocks until the required images are all cached. -func waitCacheRequiredImages(g *errgroup.Group) { +// WaitCacheRequiredImages blocks until the required images are all cached. +func WaitCacheRequiredImages(g *errgroup.Group) { if !viper.GetBool(cacheImages) { return } @@ -102,6 +108,19 @@ func saveImagesToTarFromConfig() error { return image.SaveToDir(images, constants.ImageCacheDir) } +// CacheAndLoadImagesInConfig loads the images currently in the config file +// called by 'start' and 'cache reload' commands. +func CacheAndLoadImagesInConfig() error { + images, err := imagesInConfigFile() + if err != nil { + return err + } + if len(images) == 0 { + return nil + } + return machine.CacheAndLoadImages(images) +} + func imagesInConfigFile() ([]string, error) { configFile, err := config.ReadConfig(localpath.ConfigFile()) if err != nil { diff --git a/pkg/minikube/cluster/setup.go b/pkg/minikube/cluster/setup.go index 1d194ca900..931a21f812 100644 --- a/pkg/minikube/cluster/setup.go +++ b/pkg/minikube/cluster/setup.go @@ -79,7 +79,7 @@ func InitialSetup(cc config.ClusterConfig, n config.Node, existingAddons map[str exit.WithError("Error starting cluster", err) } - if err := config.CacheAndLoadImagesInConfig(); err != nil { + if err := CacheAndLoadImagesInConfig(); err != nil { out.T(out.FailureType, "Unable to load cached images from config file.") } diff --git a/pkg/minikube/config/config.go b/pkg/minikube/config/config.go index c0ab5d0976..bf7e0bdd28 100644 --- a/pkg/minikube/config/config.go +++ b/pkg/minikube/config/config.go @@ -26,7 +26,6 @@ import ( "github.com/pkg/errors" "k8s.io/minikube/pkg/minikube/localpath" - "k8s.io/minikube/pkg/minikube/machine" ) const ( @@ -202,16 +201,3 @@ func (c *simpleConfigLoader) WriteConfigToFile(profileName string, cc *ClusterCo } return ioutil.WriteFile(path, contents, 0644) } - -// CacheAndLoadImagesInConfig loads the images currently in the config file -// called by 'start' and 'cache reload' commands. -func CacheAndLoadImagesInConfig() error { - images, err := imagesInConfigFile() - if err != nil { - return err - } - if len(images) == 0 { - return nil - } - return machine.CacheAndLoadImages(images) -} diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index 1f2147cbc5..940661bc95 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -25,14 +25,13 @@ import ( "k8s.io/minikube/pkg/minikube/machine" ) +// TODO: Share these between cluster and node packages const ( - cacheImages = "cache-images" - waitUntilHealthy = "wait" - cacheImageConfigKey = "cache" - containerRuntime = "container-runtime" - mountString = "mount-string" - createMount = "mount" - waitTimeout = "wait-timeout" + waitUntilHealthy = "wait" + containerRuntime = "container-runtime" + mountString = "mount-string" + createMount = "mount" + waitTimeout = "wait-timeout" ) // Add adds a new node config to an existing cluster. diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 8ce658c836..f770b0c88d 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -32,7 +32,7 @@ import ( func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool) error { // Now that the ISO is downloaded, pull images in the background while the VM boots. var cacheGroup errgroup.Group - beginCacheRequiredImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion) + cluster.BeginCacheRequiredImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion) // Why do we need this? if cc.Downloader == nil { @@ -50,11 +50,11 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo k8sVersion := cc.KubernetesConfig.KubernetesVersion driverName := cc.Driver // exits here in case of --download-only option. - handleDownloadOnly(&cacheGroup, k8sVersion) + cluster.HandleDownloadOnly(&cacheGroup, k8sVersion) // configure the runtime (docker, containerd, crio) cr := configureRuntimes(runner, driverName, cc.KubernetesConfig) showVersionInfo(k8sVersion, cr) - waitCacheRequiredImages(&cacheGroup) + cluster.WaitCacheRequiredImages(&cacheGroup) configureMounts() @@ -67,7 +67,7 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo exit.WithError("Failed to update node", err) } - if err := config.CacheAndLoadImagesInConfig(); err != nil { + if err := cluster.CacheAndLoadImagesInConfig(); err != nil { out.T(out.FailureType, "Unable to load cached images from config file.") } From 0cc0a25c28e46037d523a258e7021ba6a66b509e Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Thu, 27 Feb 2020 14:37:15 -0800 Subject: [PATCH 20/42] passing correct machine name around --- cmd/minikube/cmd/delete.go | 18 ++++++++++-------- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 6 +++--- pkg/minikube/cluster/setup.go | 4 ++++ 3 files changed, 17 insertions(+), 11 deletions(-) diff --git a/cmd/minikube/cmd/delete.go b/cmd/minikube/cmd/delete.go index 9169a4fa71..892a851c67 100644 --- a/cmd/minikube/cmd/delete.go +++ b/cmd/minikube/cmd/delete.go @@ -235,14 +235,16 @@ func deleteProfile(profile *pkg_config.Profile) error { out.T(out.FailureType, "Failed to kill mount process: {{.error}}", out.V{"error": err}) } - for _, n := range cc.Nodes { - if err = machine.DeleteHost(api, driver.MachineName(profile.Name, n.Name)); err != nil { - switch errors.Cause(err).(type) { - case mcnerror.ErrHostDoesNotExist: - glog.Infof("%s cluster does not exist. Proceeding ahead with cleanup.", profile.Name) - default: - out.T(out.FailureType, "Failed to delete cluster: {{.error}}", out.V{"error": err}) - out.T(out.Notice, `You may need to manually remove the "{{.name}}" VM from your hypervisor`, out.V{"name": profile.Name}) + if cc != nil { + for _, n := range cc.Nodes { + if err = machine.DeleteHost(api, driver.MachineName(profile.Name, n.Name)); err != nil { + switch errors.Cause(err).(type) { + case mcnerror.ErrHostDoesNotExist: + glog.Infof("%s cluster does not exist. Proceeding ahead with cleanup.", profile.Name) + default: + out.T(out.FailureType, "Failed to delete cluster: {{.error}}", out.V{"error": err}) + out.T(out.Notice, `You may need to manually remove the "{{.name}}" VM from your hypervisor`, out.V{"name": profile.Name}) + } } } } diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 1316612771..14aed83c6c 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -268,7 +268,7 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time port := cp.Port if driver.IsKIC(cfg.Driver) { ip = oci.DefaultBindIPV4 - port, err = oci.HostPortBinding(cfg.Driver, cfg.Name, port) + port, err = oci.HostPortBinding(cfg.Driver, driver.MachineName(cfg.Name, n.Name), port) if err != nil { return errors.Wrapf(err, "get host-bind port %d for container %s", port, cfg.Name) } @@ -338,9 +338,9 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { port := n.Port if driver.IsKIC(cfg.Driver) { ip = oci.DefaultBindIPV4 - port, err = oci.HostPortBinding(cfg.Driver, cfg.Name, port) + port, err = oci.HostPortBinding(cfg.Driver, driver.MachineName(cfg.Name, n.Name), port) if err != nil { - return errors.Wrapf(err, "get host-bind port %d for container %s", port, cfg.Name) + return errors.Wrapf(err, "get host-bind port %d for container %s", port, driver.MachineName(cfg.Name, n.Name)) } } client, err := k.client(ip, port) diff --git a/pkg/minikube/cluster/setup.go b/pkg/minikube/cluster/setup.go index 931a21f812..62b57730b1 100644 --- a/pkg/minikube/cluster/setup.go +++ b/pkg/minikube/cluster/setup.go @@ -29,6 +29,7 @@ import ( "github.com/docker/machine/libmachine/host" "github.com/golang/glog" "github.com/spf13/viper" + "golang.org/x/sync/errgroup" cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" "k8s.io/minikube/pkg/addons" "k8s.io/minikube/pkg/minikube/bootstrapper" @@ -57,6 +58,9 @@ const ( // InitialSetup performs all necessary operations on the initial control plane node when first spinning up a cluster func InitialSetup(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool) (*kubeconfig.Settings, error) { + var cacheGroup errgroup.Group + BeginCacheRequiredImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion) + _, preExists, machineAPI, host := StartMachine(&cc, &n) defer machineAPI.Close() From a349b865d83f514e11499a8c410ba24e93a28350 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Thu, 27 Feb 2020 15:09:05 -0800 Subject: [PATCH 21/42] correct machine name for selectDriver --- cmd/minikube/cmd/start.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 194c1c988a..0db8d68826 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -534,7 +534,11 @@ func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) { return } - machineName := viper.GetString(config.MachineProfile) + cp, err := config.PrimaryControlPlane(*existing) + if err != nil { + glog.Warningf("selectDriver PrimaryControlPlane: %v", err) + } + machineName := driver.MachineName(viper.GetString(config.MachineProfile), cp.Name) h, err := api.Load(machineName) if err != nil { glog.Warningf("selectDriver api.Load: %v", err) From 05116abb1fedc501788a5cd80704d803329a05b8 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 9 Mar 2020 19:29:29 -0700 Subject: [PATCH 22/42] more fallout for the merge-pocalypse --- cmd/minikube/cmd/node_add.go | 12 +++++++----- cmd/minikube/cmd/ssh.go | 8 ++++++-- cmd/minikube/cmd/start.go | 2 +- pkg/minikube/cluster/cache.go | 3 ++- pkg/minikube/cluster/setup.go | 4 ++-- pkg/minikube/node/start.go | 12 +++--------- 6 files changed, 21 insertions(+), 20 deletions(-) diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index 3995e7a6d7..9247d872ed 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -43,17 +43,19 @@ var nodeAddCmd = &cobra.Command{ exit.WithError("Error getting config", err) } - //name := profile + strconv.Itoa(len(mc.Nodes)+1) name := fmt.Sprintf("m%d", len(cc.Nodes)+1) out.T(out.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": profile}) - n, err := node.Add(cc, name, cp, worker, "", profile) - if err != nil { - exit.WithError("Error adding node to cluster", err) + // TODO: Deal with parameters better. Ideally we should be able to acceot any node-specific minikube start params here. + n := config.Node{ + Name: name, + Worker: worker, + ControlPlane: cp, + KubernetesVersion: cc.KubernetesConfig.KubernetesVersion, } - _, err = node.Start(*cc, *n, false, nil) + err = node.Add(cc, n) if err != nil { exit.WithError("Error adding node to cluster", err) } diff --git a/cmd/minikube/cmd/ssh.go b/cmd/minikube/cmd/ssh.go index 9da3698ba9..9c41e8c275 100644 --- a/cmd/minikube/cmd/ssh.go +++ b/cmd/minikube/cmd/ssh.go @@ -58,7 +58,11 @@ var sshCmd = &cobra.Command{ } n = &cp } else { - n = node.Retrieve(cc, nodeName) + n, _, err = node.Retrieve(cc, nodeName) + if err != nil { + out.FailureT("Node {{.nodeName}} does not exist.", out.V{"nodeName": nodeName}) + exit.WithError("", err) + } } host, err := machine.CheckIfHostExistsAndLoad(api, driver.MachineName(*cc, *n)) if err != nil { @@ -73,7 +77,7 @@ var sshCmd = &cobra.Command{ ssh.SetDefaultClient(ssh.External) } - err = machine.CreateSSHShell(api, *cc, cp, args) + err = machine.CreateSSHShell(api, *cc, *n, args) if err != nil { // This is typically due to a non-zero exit code, so no need for flourish. out.ErrLn("ssh: %v", err) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 0fd8f1c0c0..ba0d393f46 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -348,7 +348,7 @@ func runStart(cmd *cobra.Command, args []string) { // Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot. // Hence, saveConfig must be called before startHost, and again afterwards when we know the IP. - if err := config.SaveProfile(viper.GetString(config.MachineProfile), &mc); err != nil { + if err := config.SaveProfile(viper.GetString(config.ProfileName), &mc); err != nil { exit.WithError("Failed to save config", err) } diff --git a/pkg/minikube/cluster/cache.go b/pkg/minikube/cluster/cache.go index e918eb454e..37fb7ae7cd 100644 --- a/pkg/minikube/cluster/cache.go +++ b/pkg/minikube/cluster/cache.go @@ -51,7 +51,8 @@ func BeginCacheRequiredImages(g *errgroup.Group, imageRepository string, k8sVers }) } -func handleDownloadOnly(cacheGroup, kicGroup *errgroup.Group, k8sVersion string) { +// HandleDownloadOnly caches appropariate binaries and images +func HandleDownloadOnly(cacheGroup, kicGroup *errgroup.Group, k8sVersion string) { // If --download-only, complete the remaining downloads and exit. if !viper.GetBool("download-only") { return diff --git a/pkg/minikube/cluster/setup.go b/pkg/minikube/cluster/setup.go index 62b57730b1..8b1af87a55 100644 --- a/pkg/minikube/cluster/setup.go +++ b/pkg/minikube/cluster/setup.go @@ -89,7 +89,7 @@ func InitialSetup(cc config.ClusterConfig, n config.Node, existingAddons map[str // enable addons, both old and new! if existingAddons != nil { - addons.Start(viper.GetString(config.MachineProfile), existingAddons, config.AddonList) + addons.Start(viper.GetString(config.ProfileName), existingAddons, config.AddonList) } // special ops for none , like change minikube directory. @@ -111,7 +111,7 @@ func InitialSetup(cc config.ClusterConfig, n config.Node, existingAddons map[str // setupKubeAdm adds any requested files into the VM before Kubernetes is started func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node) bootstrapper.Bootstrapper { - bs, err := Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cfg.Name, n.Name) + bs, err := Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cfg, n) if err != nil { exit.WithError("Failed to get bootstrapper", err) } diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 3e88a23041..2c7eb18004 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -25,24 +25,18 @@ import ( "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/out" - "k8s.io/minikube/pkg/util" ) // Start spins up a guest and starts the kubernetes node. func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool) error { // Now that the ISO is downloaded, pull images in the background while the VM boots. - var cacheGroup errgroup.Group + var cacheGroup, kicGroup errgroup.Group cluster.BeginCacheRequiredImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion) - // Why do we need this? - if cc.Downloader == nil { - cc.Downloader = util.DefaultDownloader{} - } - runner, preExists, mAPI, _ := cluster.StartMachine(&cc, &n) defer mAPI.Close() - bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cc.Name, n.Name) + bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cc, n) if err != nil { exit.WithError("Failed to get bootstrapper", err) } @@ -50,7 +44,7 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo k8sVersion := cc.KubernetesConfig.KubernetesVersion driverName := cc.Driver // exits here in case of --download-only option. - cluster.HandleDownloadOnly(&cacheGroup, k8sVersion) + cluster.HandleDownloadOnly(&cacheGroup, &kicGroup, k8sVersion) // configure the runtime (docker, containerd, crio) cr := configureRuntimes(runner, driverName, cc.KubernetesConfig) showVersionInfo(k8sVersion, cr) From ef93b291ca278d8654c29e7809098606ee441f7d Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Tue, 10 Mar 2020 13:46:06 -0700 Subject: [PATCH 23/42] fix build failures --- pkg/minikube/machine/cluster_test.go | 2 -- pkg/minikube/node/node.go | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/minikube/machine/cluster_test.go b/pkg/minikube/machine/cluster_test.go index 2f7cd5b15b..e1f7d5a390 100644 --- a/pkg/minikube/machine/cluster_test.go +++ b/pkg/minikube/machine/cluster_test.go @@ -108,8 +108,6 @@ func TestStartHostExists(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) - n := defaultNodeConfig - // Create an initial host. ih, err := createHost(api, defaultClusterConfig, config.Node{Name: "minikube"}) if err != nil { diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index fb12bfca75..1b86069ee9 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -32,6 +32,7 @@ const ( mountString = "mount-string" createMount = "mount" waitTimeout = "wait-timeout" + imageRepository = "image-repository" ) // Add adds a new node config to an existing cluster. From 3ed818c48860e6777a559f14f30a8a029d5357a9 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Thu, 12 Mar 2020 15:40:13 -0700 Subject: [PATCH 24/42] cosmetic fixes --- cmd/minikube/cmd/node_add.go | 2 +- cmd/minikube/cmd/node_start.go | 5 +---- cmd/minikube/cmd/start.go | 16 +++++++------- cmd/minikube/cmd/status.go | 11 ++++++++-- pkg/minikube/bootstrapper/bsutil/kubeadm.go | 16 +++++++++----- pkg/minikube/node/node.go | 4 ++-- pkg/minikube/node/start.go | 23 ++++++++++++--------- 7 files changed, 45 insertions(+), 32 deletions(-) diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index 9247d872ed..a450684ec3 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -43,7 +43,7 @@ var nodeAddCmd = &cobra.Command{ exit.WithError("Error getting config", err) } - name := fmt.Sprintf("m%d", len(cc.Nodes)+1) + name := fmt.Sprintf("m%02d", len(cc.Nodes)+1) out.T(out.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": profile}) diff --git a/cmd/minikube/cmd/node_start.go b/cmd/minikube/cmd/node_start.go index 92f866f99e..658ee092bf 100644 --- a/cmd/minikube/cmd/node_start.go +++ b/cmd/minikube/cmd/node_start.go @@ -61,10 +61,7 @@ var nodeStartCmd = &cobra.Command{ } // Start it up baby - err = node.Start(*cc, *n, nil) - if err != nil { - out.FatalT("Failed to start node {{.name}}", out.V{"name": name}) - } + node.Start(*cc, *n, nil) }, } diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 5df3d73d88..9b87afbd94 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -315,7 +315,7 @@ func runStart(cmd *cobra.Command, args []string) { } k8sVersion := getKubernetesVersion(existing) - mc, n, err := generateCfgFromFlags(cmd, k8sVersion, driverName) + cc, n, err := generateCfgFromFlags(cmd, k8sVersion, driverName) if err != nil { exit.WithError("Failed to generate config", err) } @@ -331,7 +331,7 @@ func runStart(cmd *cobra.Command, args []string) { if err != nil { exit.WithError("Failed to cache ISO", err) } - mc.MinikubeISO = url + cc.MinikubeISO = url } if viper.GetBool(nativeSSH) { @@ -350,16 +350,16 @@ func runStart(cmd *cobra.Command, args []string) { // Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot. // Hence, saveConfig must be called before startHost, and again afterwards when we know the IP. - if err := config.SaveProfile(viper.GetString(config.ProfileName), &mc); err != nil { + if err := config.SaveProfile(viper.GetString(config.ProfileName), &cc); err != nil { exit.WithError("Failed to save config", err) } - kubeconfig, err := cluster.InitialSetup(mc, n, existingAddons) + kubeconfig, err := cluster.InitialSetup(cc, n, existingAddons) if err != nil { exit.WithError("Starting node", err) } - if err := showKubectlInfo(kubeconfig, k8sVersion, mc.Name); err != nil { + if err := showKubectlInfo(kubeconfig, k8sVersion, cc.Name); err != nil { glog.Errorf("kubectl info: %v", err) } @@ -369,14 +369,14 @@ func runStart(cmd *cobra.Command, args []string) { out.T(out.Meh, "The none driver is not compatible with multi-node clusters.") } for i := 1; i < numNodes; i++ { - nodeName := fmt.Sprintf("%s%d", n.Name, i+1) + nodeName := fmt.Sprintf("m%02d", i+1) n := config.Node{ Name: nodeName, Worker: true, ControlPlane: false, - KubernetesVersion: mc.KubernetesConfig.KubernetesVersion, + KubernetesVersion: cc.KubernetesConfig.KubernetesVersion, } - err := node.Add(&mc, n) + err := node.Add(&cc, n) if err != nil { exit.WithError("adding node", err) } diff --git a/cmd/minikube/cmd/status.go b/cmd/minikube/cmd/status.go index 012db5a980..6342706bd8 100644 --- a/cmd/minikube/cmd/status.go +++ b/cmd/minikube/cmd/status.go @@ -150,8 +150,15 @@ func exitCode(st *Status) int { func status(api libmachine.API, name string, controlPlane bool) (*Status, error) { - profile := strings.Split(name, "-")[0] - node := strings.Split(name, "-")[1] + var profile, node string + + if strings.Contains(name, "-") { + profile = strings.Split(name, "-")[0] + node = strings.Split(name, "-")[1] + } else { + profile = name + node = name + } st := &Status{ Name: node, diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm.go b/pkg/minikube/bootstrapper/bsutil/kubeadm.go index 0f37e5ae11..2b28ed6f99 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm.go @@ -36,8 +36,8 @@ import ( const remoteContainerRuntime = "remote" // GenerateKubeadmYAML generates the kubeadm.yaml file -func GenerateKubeadmYAML(mc config.ClusterConfig, r cruntime.Manager, n config.Node) ([]byte, error) { - k8s := mc.KubernetesConfig +func GenerateKubeadmYAML(cc config.ClusterConfig, r cruntime.Manager, n config.Node) ([]byte, error) { + k8s := cc.KubernetesConfig version, err := ParseKubernetesVersion(k8s.KubernetesVersion) if err != nil { return nil, errors.Wrap(err, "parsing kubernetes version") @@ -50,7 +50,7 @@ func GenerateKubeadmYAML(mc config.ClusterConfig, r cruntime.Manager, n config.N } // In case of no port assigned, use default - cp, err := config.PrimaryControlPlane(&mc) + cp, err := config.PrimaryControlPlane(&cc) if err != nil { return nil, errors.Wrap(err, "getting control plane") } @@ -64,6 +64,11 @@ func GenerateKubeadmYAML(mc config.ClusterConfig, r cruntime.Manager, n config.N return nil, errors.Wrap(err, "generating extra component config for kubeadm") } + controlPlaneEndpoint := cp.IP + if n.ControlPlane { + controlPlaneEndpoint = "localhost" + } + opts := struct { CertDir string ServiceCIDR string @@ -91,7 +96,7 @@ func GenerateKubeadmYAML(mc config.ClusterConfig, r cruntime.Manager, n config.N KubernetesVersion: k8s.KubernetesVersion, EtcdDataDir: EtcdDataDir(), ClusterName: k8s.ClusterName, - NodeName: cp.Name, + NodeName: n.Name, CRISocket: r.SocketPath(), ImageRepository: k8s.ImageRepository, ComponentOptions: componentOpts, @@ -101,7 +106,7 @@ func GenerateKubeadmYAML(mc config.ClusterConfig, r cruntime.Manager, n config.N NodeIP: n.IP, // NOTE: If set to an specific VM IP, things may break if the IP changes on host restart // For multi-node, we may need to figure out an alternate strategy, like DNS or hosts files - ControlPlaneAddress: "localhost", + ControlPlaneAddress: controlPlaneEndpoint, } if k8s.ServiceCIDR != "" { @@ -126,6 +131,7 @@ func GenerateKubeadmYAML(mc config.ClusterConfig, r cruntime.Manager, n config.N if err := configTmpl.Execute(&b, opts); err != nil { return nil, err } + fmt.Printf("%s OPTS=%+v\n", n.Name, opts) glog.Infof("kubeadm config:\n%s\n", b.String()) return b.Bytes(), nil } diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index 1b86069ee9..3dba42d1f0 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -43,8 +43,8 @@ func Add(cc *config.ClusterConfig, n config.Node) error { return err } - err = Start(*cc, n, nil) - return err + Start(*cc, n, nil) + return nil } // Delete stops and deletes the given node from the given cluster diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 0b2d4d3708..80fc09a52c 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -28,12 +28,12 @@ import ( ) // Start spins up a guest and starts the kubernetes node. -func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool) error { +func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool) { // Now that the ISO is downloaded, pull images in the background while the VM boots. var cacheGroup, kicGroup errgroup.Group cluster.BeginCacheRequiredImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion, cc.KubernetesConfig.ContainerRuntime) - runner, preExists, mAPI, _ := cluster.StartMachine(&cc, &n) + runner, _, mAPI, _ := cluster.StartMachine(&cc, &n) defer mAPI.Close() bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cc, n) @@ -69,13 +69,6 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo out.T(out.FailureType, "Unable to load cached images from config file.") } - // Skip pre-existing, because we already waited for health - if viper.GetBool(waitUntilHealthy) && !preExists { - if err := bs.WaitForNode(cc, n, viper.GetDuration(waitTimeout)); err != nil { - exit.WithError("Wait failed", err) - } - } - err = bs.SetupCerts(cc.KubernetesConfig, n) if err != nil { exit.WithError("setting up certs", err) @@ -93,5 +86,15 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo if err != nil { exit.WithError("generating join token", err) } - return bs.JoinCluster(cc, n, joinCmd) + err = bs.JoinCluster(cc, n, joinCmd) + if err != nil { + exit.WithError("joining cluster", err) + } + + /*// Skip pre-existing, because we already waited for health + if viper.GetBool(waitUntilHealthy) && !preExists { + if err := bs.WaitForNode(cc, n, viper.GetDuration(waitTimeout)); err != nil { + exit.WithError("Wait failed", err) + } + }*/ } From b6ab2931d69b789d1a07e0e8930dfd444a4248c4 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Fri, 13 Mar 2020 11:29:46 -0700 Subject: [PATCH 25/42] run all necessary steps on all nodes --- pkg/minikube/bootstrapper/bootstrapper.go | 3 +- pkg/minikube/bootstrapper/bsutil/kubeadm.go | 4 +- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 41 +++++++++++++------- pkg/minikube/node/start.go | 24 +++++------- 4 files changed, 42 insertions(+), 30 deletions(-) diff --git a/pkg/minikube/bootstrapper/bootstrapper.go b/pkg/minikube/bootstrapper/bootstrapper.go index f250423833..1dac315e80 100644 --- a/pkg/minikube/bootstrapper/bootstrapper.go +++ b/pkg/minikube/bootstrapper/bootstrapper.go @@ -42,7 +42,8 @@ type Bootstrapper interface { WaitForNode(config.ClusterConfig, config.Node, time.Duration) error JoinCluster(config.ClusterConfig, config.Node, string) error UpdateNode(config.ClusterConfig, config.Node, cruntime.Manager) error - GenerateToken(config.KubernetesConfig) (string, error) + SetupNode(config.ClusterConfig) error + GenerateToken(config.ClusterConfig) (string, error) // LogCommands returns a map of log type to a command which will display that log. LogCommands(LogOptions) map[string]string SetupCerts(config.KubernetesConfig, config.Node) error diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm.go b/pkg/minikube/bootstrapper/bsutil/kubeadm.go index 2b28ed6f99..dacce16d43 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm.go @@ -65,9 +65,9 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, r cruntime.Manager, n config.N } controlPlaneEndpoint := cp.IP - if n.ControlPlane { + /*if n.ControlPlane { controlPlaneEndpoint = "localhost" - } + }*/ opts := struct { CertDir string diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index ca207f63d9..a5143a7587 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -200,22 +200,17 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { } + err = k.SetupNode(cfg) + if err != nil { + return errors.Wrap(err, "setting up node") + } + c := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s init --config %s %s --ignore-preflight-errors=%s", bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), bsutil.KubeadmYamlPath, extraFlags, strings.Join(ignore, ","))) rr, err := k.c.RunCmd(c) if err != nil { return errors.Wrapf(err, "init failed. output: %q", rr.Output()) } - if cfg.Driver == driver.Docker { - if err := k.applyKicOverlay(cfg); err != nil { - return errors.Wrap(err, "apply kic overlay") - } - } - - if err := k.applyNodeLabels(cfg); err != nil { - glog.Warningf("unable to apply node labels: %v", err) - } - if err := bsutil.AdjustResourceLimits(k.c); err != nil { glog.Warningf("unable to adjust resource limits: %v", err) } @@ -227,6 +222,20 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { return nil } +func (k *Bootstrapper) SetupNode(cfg config.ClusterConfig) error { + if cfg.Driver == driver.Docker { + if err := k.applyKicOverlay(cfg); err != nil { + return errors.Wrap(err, "apply kic overlay") + } + } + + if err := k.applyNodeLabels(cfg); err != nil { + glog.Warningf("unable to apply node labels: %v", err) + } + + return nil +} + // client sets and returns a Kubernetes client to use to speak to a kubeadm launched apiserver func (k *Bootstrapper) client(ip string, port int) (*kubernetes.Clientset, error) { if k.k8sClient != nil { @@ -384,14 +393,20 @@ func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinC } // GenerateToken creates a token and returns the appropriate kubeadm join command to run -func (k *Bootstrapper) GenerateToken(k8s config.KubernetesConfig) (string, error) { - tokenCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s token create --print-join-command --ttl=0", bsutil.InvokeKubeadm(k8s.KubernetesVersion))) +func (k *Bootstrapper) GenerateToken(cc config.ClusterConfig) (string, error) { + tokenCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s token create --print-join-command --ttl=0", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion))) r, err := k.c.RunCmd(tokenCmd) if err != nil { return "", errors.Wrap(err, "generating bootstrap token") } + + /*cp, err := config.PrimaryControlPlane(&cc) + if err != nil { + return "", errors.Wrap(err, "getting primary control plane") + }*/ joinCmd := r.Stdout.String() - joinCmd = strings.Replace(joinCmd, "kubeadm", bsutil.InvokeKubeadm(k8s.KubernetesVersion), 1) + joinCmd = strings.Replace(joinCmd, "kubeadm", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion), 1) + //joinCmd = strings.ReplaceAll(joinCmd, "localhost", cp.IP) joinCmd = fmt.Sprintf("%s --ignore-preflight-errors=all", strings.TrimSpace(joinCmd)) return joinCmd, nil diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 80fc09a52c..1550390771 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -24,7 +24,6 @@ import ( "k8s.io/minikube/pkg/minikube/cluster" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/exit" - "k8s.io/minikube/pkg/minikube/out" ) // Start spins up a guest and starts the kubernetes node. @@ -66,14 +65,17 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo } if err := cluster.CacheAndLoadImagesInConfig(); err != nil { - out.T(out.FailureType, "Unable to load cached images from config file.") + exit.WithError("Unable to load cached images from config file.", err) } - err = bs.SetupCerts(cc.KubernetesConfig, n) - if err != nil { + if err = bs.SetupCerts(cc.KubernetesConfig, n); err != nil { exit.WithError("setting up certs", err) } + if err = bs.SetupNode(cc); err != nil { + exit.WithError("Failed to setup node", err) + } + cp, err := config.PrimaryControlPlane(&cc) if err != nil { exit.WithError("Getting primary control plane", err) @@ -82,19 +84,13 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo if err != nil { exit.WithError("Getting bootstrapper", err) } - joinCmd, err := cpBs.GenerateToken(cc.KubernetesConfig) + + joinCmd, err := cpBs.GenerateToken(cc) if err != nil { exit.WithError("generating join token", err) } - err = bs.JoinCluster(cc, n, joinCmd) - if err != nil { + + if err = bs.JoinCluster(cc, n, joinCmd); err != nil { exit.WithError("joining cluster", err) } - - /*// Skip pre-existing, because we already waited for health - if viper.GetBool(waitUntilHealthy) && !preExists { - if err := bs.WaitForNode(cc, n, viper.GetDuration(waitTimeout)); err != nil { - exit.WithError("Wait failed", err) - } - }*/ } From a9b73b8ba3f539832f26ec2051d62791c391a097 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Fri, 13 Mar 2020 15:03:19 -0700 Subject: [PATCH 26/42] fixing up minikube start path --- cmd/minikube/cmd/start.go | 8 +- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 44 ++--- pkg/minikube/cluster/cache.go | 8 +- pkg/minikube/cluster/setup.go | 19 +- pkg/minikube/machine/cluster_test.go | 12 +- pkg/minikube/machine/start.go | 20 +- pkg/minikube/node/machine.go | 187 ------------------- pkg/minikube/node/start.go | 14 +- 8 files changed, 71 insertions(+), 241 deletions(-) delete mode 100644 pkg/minikube/node/machine.go diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 9b87afbd94..af7e32b078 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -359,10 +359,6 @@ func runStart(cmd *cobra.Command, args []string) { exit.WithError("Starting node", err) } - if err := showKubectlInfo(kubeconfig, k8sVersion, cc.Name); err != nil { - glog.Errorf("kubectl info: %v", err) - } - numNodes := viper.GetInt(nodes) if numNodes > 1 { if driver.IsKIC(driverName) { @@ -382,6 +378,10 @@ func runStart(cmd *cobra.Command, args []string) { } } } + + if err := showKubectlInfo(kubeconfig, k8sVersion, cc.Name); err != nil { + glog.Errorf("kubectl info: %v", err) + } } func updateDriver(driverName string) { diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index a5143a7587..cb1d0d1058 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -222,6 +222,7 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { return nil } +// SetupNode runs commands that need to be on all nodes func (k *Bootstrapper) SetupNode(cfg config.ClusterConfig) error { if cfg.Driver == driver.Docker { if err := k.applyKicOverlay(cfg); err != nil { @@ -339,33 +340,32 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { return errors.Wrap(err, "apiserver healthz") } - for _, n := range cfg.Nodes { - ip := n.IP - port := n.Port - if driver.IsKIC(cfg.Driver) { - ip = oci.DefaultBindIPV4 - port, err = oci.HostPortBinding(cfg.Driver, driver.MachineName(cfg, n), port) - if err != nil { - return errors.Wrapf(err, "get host-bind port %d for container %s", port, driver.MachineName(cfg, n)) - } - } - client, err := k.client(ip, port) + cp, err := config.PrimaryControlPlane(&cfg) + ip := cp.IP + port := cp.Port + if driver.IsKIC(cfg.Driver) { + ip = oci.DefaultBindIPV4 + port, err = oci.HostPortBinding(cfg.Driver, driver.MachineName(cfg, cp), port) if err != nil { - return errors.Wrap(err, "getting k8s client") + return errors.Wrapf(err, "get host-bind port %d for container %s", port, driver.MachineName(cfg, cp)) } + } + client, err := k.client(ip, port) + if err != nil { + return errors.Wrap(err, "getting k8s client") + } - if err := kverify.SystemPods(client, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil { - return errors.Wrap(err, "system pods") - } + if err := kverify.SystemPods(client, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil { + return errors.Wrap(err, "system pods") + } - // Explicitly re-enable kubeadm addons (proxy, coredns) so that they will check for IP or configuration changes. - if rr, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("%s phase addon all --config %s", baseCmd, bsutil.KubeadmYamlPath))); err != nil { - return errors.Wrapf(err, fmt.Sprintf("addon phase cmd:%q", rr.Command())) - } + // Explicitly re-enable kubeadm addons (proxy, coredns) so that they will check for IP or configuration changes. + if rr, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("%s phase addon all --config %s", baseCmd, bsutil.KubeadmYamlPath))); err != nil { + return errors.Wrapf(err, fmt.Sprintf("addon phase cmd:%q", rr.Command())) + } - if err := bsutil.AdjustResourceLimits(k.c); err != nil { - glog.Warningf("unable to adjust resource limits: %v", err) - } + if err := bsutil.AdjustResourceLimits(k.c); err != nil { + glog.Warningf("unable to adjust resource limits: %v", err) } return nil } diff --git a/pkg/minikube/cluster/cache.go b/pkg/minikube/cluster/cache.go index 809e87580b..e7563d1d78 100644 --- a/pkg/minikube/cluster/cache.go +++ b/pkg/minikube/cluster/cache.go @@ -40,8 +40,8 @@ const ( cacheImageConfigKey = "cache" ) -// BeginCacheRequiredImages caches images required for kubernetes version in the background -func BeginCacheRequiredImages(g *errgroup.Group, imageRepository string, k8sVersion string, cRuntime string) { +// BeginCacheKubernetesImages caches images required for kubernetes version in the background +func BeginCacheKubernetesImages(g *errgroup.Group, imageRepository string, k8sVersion string, cRuntime string) { if download.PreloadExists(k8sVersion, cRuntime) { g.Go(func() error { glog.Info("Caching tarball of preloaded images") @@ -96,8 +96,8 @@ func doCacheBinaries(k8sVersion string) error { return machine.CacheBinariesForBootstrapper(k8sVersion, viper.GetString(cmdcfg.Bootstrapper)) } -// beginDownloadKicArtifacts downloads the kic image + preload tarball, returns true if preload is available -func beginDownloadKicArtifacts(g *errgroup.Group) { +// BeginDownloadKicArtifacts downloads the kic image + preload tarball, returns true if preload is available +func BeginDownloadKicArtifacts(g *errgroup.Group) { glog.Info("Beginning downloading kic artifacts") g.Go(func() error { glog.Infof("Downloading %s to local daemon", kic.BaseImage) diff --git a/pkg/minikube/cluster/setup.go b/pkg/minikube/cluster/setup.go index ad2fc408ff..cd653bbdbd 100644 --- a/pkg/minikube/cluster/setup.go +++ b/pkg/minikube/cluster/setup.go @@ -71,7 +71,17 @@ func InitialSetup(cc config.ClusterConfig, n config.Node, existingAddons map[str bs := setupKubeAdm(machineAPI, cc, n) var cacheGroup errgroup.Group - BeginCacheRequiredImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion, cc.KubernetesConfig.ContainerRuntime) + if !driver.BareMetal(cc.Driver) { + BeginCacheKubernetesImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion, cc.KubernetesConfig.ContainerRuntime) + } + + var kicGroup errgroup.Group + if driver.IsKIC(cc.Driver) { + BeginDownloadKicArtifacts(&kicGroup) + } + + HandleDownloadOnly(&cacheGroup, &kicGroup, n.KubernetesVersion) + WaitDownloadKicArtifacts(&kicGroup) // pull images or restart cluster out.T(out.Launch, "Launching Kubernetes ... ") @@ -189,12 +199,7 @@ func StartMachine(cfg *config.ClusterConfig, node *config.Node) (runner command. // startHost starts a new minikube host using a VM or None func startHost(api libmachine.API, mc config.ClusterConfig, n config.Node) (*host.Host, bool) { - exists, err := api.Exists(n.Name) - if err != nil { - exit.WithError("Failed to check if machine exists", err) - } - - host, err := machine.StartHost(api, mc, n) + host, exists, err := machine.StartHost(api, mc, n) if err != nil { exit.WithError("Unable to start VM. Please investigate and run 'minikube delete' if possible", err) } diff --git a/pkg/minikube/machine/cluster_test.go b/pkg/minikube/machine/cluster_test.go index 32dcc0ec0f..c4c26d27c2 100644 --- a/pkg/minikube/machine/cluster_test.go +++ b/pkg/minikube/machine/cluster_test.go @@ -130,7 +130,7 @@ func TestStartHostExists(t *testing.T) { n := config.Node{Name: ih.Name} // This should pass without calling Create because the host exists already. - h, err := StartHost(api, mc, n) + h, _, err := StartHost(api, mc, n) if err != nil { t.Fatalf("Error starting host: %v", err) } @@ -164,7 +164,7 @@ func TestStartHostErrMachineNotExist(t *testing.T) { n := config.Node{Name: h.Name} // This should pass with creating host, while machine does not exist. - h, err = StartHost(api, mc, n) + h, _, err = StartHost(api, mc, n) if err != nil { if err != ErrorMachineNotExist { t.Fatalf("Error starting host: %v", err) @@ -177,7 +177,7 @@ func TestStartHostErrMachineNotExist(t *testing.T) { n.Name = h.Name // Second call. This should pass without calling Create because the host exists already. - h, err = StartHost(api, mc, n) + h, _, err = StartHost(api, mc, n) if err != nil { t.Fatalf("Error starting host: %v", err) } @@ -210,7 +210,7 @@ func TestStartStoppedHost(t *testing.T) { mc := defaultClusterConfig mc.Name = h.Name n := config.Node{Name: h.Name} - h, err = StartHost(api, mc, n) + h, _, err = StartHost(api, mc, n) if err != nil { t.Fatal("Error starting host.") } @@ -238,7 +238,7 @@ func TestStartHost(t *testing.T) { md := &tests.MockDetector{Provisioner: &tests.MockProvisioner{}} provision.SetDetector(md) - h, err := StartHost(api, defaultClusterConfig, config.Node{Name: "minikube"}) + h, _, err := StartHost(api, defaultClusterConfig, config.Node{Name: "minikube"}) if err != nil { t.Fatal("Error starting host.") } @@ -272,7 +272,7 @@ func TestStartHostConfig(t *testing.T) { DockerOpt: []string{"param=value"}, } - h, err := StartHost(api, cfg, config.Node{Name: "minikube"}) + h, _, err := StartHost(api, cfg, config.Node{Name: "minikube"}) if err != nil { t.Fatal("Error starting host.") } diff --git a/pkg/minikube/machine/start.go b/pkg/minikube/machine/start.go index 8f44a12b66..368af5b3af 100644 --- a/pkg/minikube/machine/start.go +++ b/pkg/minikube/machine/start.go @@ -62,28 +62,32 @@ var ( ) // StartHost starts a host VM. -func StartHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*host.Host, error) { +func StartHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*host.Host, bool, error) { + machineName := driver.MachineName(cfg, n) + // Prevent machine-driver boot races, as well as our own certificate race - releaser, err := acquireMachinesLock(n.Name) + releaser, err := acquireMachinesLock(machineName) if err != nil { - return nil, errors.Wrap(err, "boot lock") + return nil, false, errors.Wrap(err, "boot lock") } start := time.Now() defer func() { - glog.Infof("releasing machines lock for %q, held for %s", n.Name, time.Since(start)) + glog.Infof("releasing machines lock for %q, held for %s", machineName, time.Since(start)) releaser.Release() }() - exists, err := api.Exists(n.Name) + exists, err := api.Exists(machineName) if err != nil { - return nil, errors.Wrapf(err, "exists: %s", n.Name) + return nil, false, errors.Wrapf(err, "exists: %s", machineName) } if !exists { glog.Infof("Provisioning new machine with config: %+v %+v", cfg, n) - return createHost(api, cfg, n) + h, err := createHost(api, cfg, n) + return h, exists, err } glog.Infoln("Skipping create...Using existing machine configuration") - return fixHost(api, cfg, n) + h, err := fixHost(api, cfg, n) + return h, exists, err } func engineOptions(cfg config.ClusterConfig) *engine.Options { diff --git a/pkg/minikube/node/machine.go b/pkg/minikube/node/machine.go deleted file mode 100644 index 483131515a..0000000000 --- a/pkg/minikube/node/machine.go +++ /dev/null @@ -1,187 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package node - -import ( - "fmt" - "net" - "os" - "os/exec" - "strings" - "time" - - "github.com/docker/machine/libmachine" - "github.com/docker/machine/libmachine/host" - "github.com/golang/glog" - "github.com/spf13/viper" - "k8s.io/minikube/pkg/minikube/bootstrapper/images" - "k8s.io/minikube/pkg/minikube/command" - "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/driver" - "k8s.io/minikube/pkg/minikube/exit" - "k8s.io/minikube/pkg/minikube/machine" - "k8s.io/minikube/pkg/minikube/out" - "k8s.io/minikube/pkg/minikube/proxy" - "k8s.io/minikube/pkg/util/retry" -) - -func startMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) { - m, err := machine.NewAPIClient() - if err != nil { - exit.WithError("Failed to get machine client", err) - } - host, preExists = startHost(m, *cfg, *node) - runner, err = machine.CommandRunner(host) - if err != nil { - exit.WithError("Failed to get command runner", err) - } - - ip := validateNetwork(host, runner) - - // Bypass proxy for minikube's vm host ip - err = proxy.ExcludeIP(ip) - if err != nil { - out.ErrT(out.FailureType, "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.", out.V{"ip": ip}) - } - // Save IP to configuration file for subsequent use - node.IP = ip - - if err := Save(cfg, node); err != nil { - exit.WithError("Failed to save config", err) - } - - return runner, preExists, m, host -} - -// startHost starts a new minikube host using a VM or None -func startHost(api libmachine.API, mc config.ClusterConfig, n config.Node) (*host.Host, bool) { - exists, err := api.Exists(mc.Name) - if err != nil { - exit.WithError("Failed to check if machine exists", err) - } - - host, err := machine.StartHost(api, mc, n) - if err != nil { - exit.WithError("Unable to start VM. Please investigate and run 'minikube delete' if possible", err) - } - return host, exists -} - -// validateNetwork tries to catch network problems as soon as possible -func validateNetwork(h *host.Host, r command.Runner) string { - ip, err := h.Driver.GetIP() - if err != nil { - exit.WithError("Unable to get VM IP address", err) - } - - optSeen := false - warnedOnce := false - for _, k := range proxy.EnvVars { - if v := os.Getenv(k); v != "" { - if !optSeen { - out.T(out.Internet, "Found network options:") - optSeen = true - } - out.T(out.Option, "{{.key}}={{.value}}", out.V{"key": k, "value": v}) - ipExcluded := proxy.IsIPExcluded(ip) // Skip warning if minikube ip is already in NO_PROXY - k = strings.ToUpper(k) // for http_proxy & https_proxy - if (k == "HTTP_PROXY" || k == "HTTPS_PROXY") && !ipExcluded && !warnedOnce { - out.WarningT("You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details", out.V{"ip_address": ip, "documentation_url": "https://minikube.sigs.k8s.io/docs/reference/networking/proxy/"}) - warnedOnce = true - } - } - } - - if !driver.BareMetal(h.Driver.DriverName()) && !driver.IsKIC(h.Driver.DriverName()) { - trySSH(h, ip) - } - - tryLookup(r) - tryRegistry(r) - return ip -} - -func trySSH(h *host.Host, ip string) { - if viper.GetBool("force") { - return - } - - sshAddr := net.JoinHostPort(ip, "22") - - dial := func() (err error) { - d := net.Dialer{Timeout: 3 * time.Second} - conn, err := d.Dial("tcp", sshAddr) - if err != nil { - out.WarningT("Unable to verify SSH connectivity: {{.error}}. Will retry...", out.V{"error": err}) - return err - } - _ = conn.Close() - return nil - } - - if err := retry.Expo(dial, time.Second, 13*time.Second); err != nil { - exit.WithCodeT(exit.IO, `minikube is unable to connect to the VM: {{.error}} - - This is likely due to one of two reasons: - - - VPN or firewall interference - - {{.hypervisor}} network configuration issue - - Suggested workarounds: - - - Disable your local VPN or firewall software - - Configure your local VPN or firewall to allow access to {{.ip}} - - Restart or reinstall {{.hypervisor}} - - Use an alternative --driver - - Use --force to override this connectivity check - `, out.V{"error": err, "hypervisor": h.Driver.DriverName(), "ip": ip}) - } -} - -func tryLookup(r command.Runner) { - // DNS check - if rr, err := r.RunCmd(exec.Command("nslookup", "-type=ns", "kubernetes.io")); err != nil { - glog.Infof("%s failed: %v which might be okay will retry nslookup without query type", rr.Args, err) - // will try with without query type for ISOs with different busybox versions. - if _, err = r.RunCmd(exec.Command("nslookup", "kubernetes.io")); err != nil { - glog.Warningf("nslookup failed: %v", err) - // try with the older "host" command, instead of the newer "nslookup" - if _, err = r.RunCmd(exec.Command("host", "kubernetes.io")); err != nil { - out.WarningT("Node may be unable to resolve external DNS records") - } - } - } -} -func tryRegistry(r command.Runner) { - // Try an HTTPS connection to the image repository - proxy := os.Getenv("HTTPS_PROXY") - opts := []string{"-sS"} - if proxy != "" && !strings.HasPrefix(proxy, "localhost") && !strings.HasPrefix(proxy, "127.0") { - opts = append([]string{"-x", proxy}, opts...) - } - - repo := viper.GetString(imageRepository) - if repo == "" { - repo = images.DefaultKubernetesRepo - } - - opts = append(opts, fmt.Sprintf("https://%s/", repo)) - if rr, err := r.RunCmd(exec.Command("curl", opts...)); err != nil { - glog.Warningf("%s failed: %v", rr.Args, err) - out.WarningT("VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository", out.V{"repository": repo}) - } -} diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 1550390771..28c6509c49 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -23,14 +23,22 @@ import ( "k8s.io/minikube/pkg/addons" "k8s.io/minikube/pkg/minikube/cluster" "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" ) // Start spins up a guest and starts the kubernetes node. func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool) { // Now that the ISO is downloaded, pull images in the background while the VM boots. - var cacheGroup, kicGroup errgroup.Group - cluster.BeginCacheRequiredImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion, cc.KubernetesConfig.ContainerRuntime) + var cacheGroup errgroup.Group + if !driver.BareMetal(cc.Driver) { + cluster.BeginCacheKubernetesImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion, cc.KubernetesConfig.ContainerRuntime) + } + + var kicGroup errgroup.Group + if driver.IsKIC(cc.Driver) { + cluster.BeginDownloadKicArtifacts(&kicGroup) + } runner, _, mAPI, _ := cluster.StartMachine(&cc, &n) defer mAPI.Close() @@ -40,7 +48,7 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo exit.WithError("Failed to get bootstrapper", err) } - k8sVersion := cc.KubernetesConfig.KubernetesVersion + k8sVersion := n.KubernetesVersion driverName := cc.Driver // exits here in case of --download-only option. cluster.HandleDownloadOnly(&cacheGroup, &kicGroup, k8sVersion) From d98ebcfb687b530b31b7a7c6d15bd461c48da1c9 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Fri, 13 Mar 2020 15:32:14 -0700 Subject: [PATCH 27/42] lint --- cmd/minikube/cmd/stop.go | 34 ++++++++++++++------ pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 3 ++ pkg/minikube/node/node.go | 3 -- 3 files changed, 28 insertions(+), 12 deletions(-) diff --git a/cmd/minikube/cmd/stop.go b/cmd/minikube/cmd/stop.go index c3586b8492..005284a022 100644 --- a/cmd/minikube/cmd/stop.go +++ b/cmd/minikube/cmd/stop.go @@ -17,7 +17,12 @@ limitations under the License. package cmd import ( + "time" + "github.com/docker/machine/libmachine" + "github.com/docker/machine/libmachine/mcnerror" + "github.com/golang/glog" + "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/config" @@ -27,6 +32,7 @@ import ( "k8s.io/minikube/pkg/minikube/kubeconfig" "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/util/retry" ) // stopCmd represents the stop command @@ -72,16 +78,26 @@ func runStop(cmd *cobra.Command, args []string) { func stop(api libmachine.API, cluster config.ClusterConfig, n config.Node) bool { nonexistent := false - - // TODO replace this back with expo backoff - for _, n := range cluster.Nodes { - err := machine.StopHost(api, driver.MachineName(cluster, n)) - if err != nil { - exit.WithError("Unable to stop VM", err) + stop := func() (err error) { + machineName := driver.MachineName(cluster, n) + err = machine.StopHost(api, machineName) + if err == nil { + return nil } - /*if err := retry.Expo(fn, 5*time.Second, 3*time.Minute, 5); err != nil { - exit.WithError("Unable to stop VM", err) - }*/ + glog.Warningf("stop host returned error: %v", err) + + switch err := errors.Cause(err).(type) { + case mcnerror.ErrHostDoesNotExist: + out.T(out.Meh, `"{{.machineName}}" does not exist, nothing to stop`, out.V{"machineName": machineName}) + nonexistent = true + return nil + default: + return err + } + } + + if err := retry.Expo(stop, 5*time.Second, 3*time.Minute, 5); err != nil { + exit.WithError("Unable to stop VM", err) } return nonexistent diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 13d8f6e921..ed458c16b2 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -346,6 +346,9 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { } cp, err := config.PrimaryControlPlane(&cfg) + if err != nil { + return errors.Wrap(err, "getting control plane") + } ip := cp.IP port := cp.Port if driver.IsKIC(cfg.Driver) { diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index 3dba42d1f0..41d518f3f2 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -27,12 +27,9 @@ import ( // TODO: Share these between cluster and node packages const ( - waitUntilHealthy = "wait" containerRuntime = "container-runtime" mountString = "mount-string" createMount = "mount" - waitTimeout = "wait-timeout" - imageRepository = "image-repository" ) // Add adds a new node config to an existing cluster. From c3b56b646665455ff93a1e2309296850466ecd7e Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Fri, 13 Mar 2020 18:08:10 -0700 Subject: [PATCH 28/42] let's rearrange a bunch of code --- cmd/minikube/cmd/start.go | 10 +- pkg/minikube/bootstrapper/bsutil/kubeadm.go | 7 +- pkg/minikube/bootstrapper/bsutil/kubelet.go | 3 +- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 3 +- pkg/minikube/cluster/cache.go | 2 +- pkg/minikube/cluster/setup.go | 96 ++++++++++++++++---- pkg/minikube/driver/driver.go | 2 +- pkg/minikube/node/config.go | 45 --------- pkg/minikube/node/start.go | 2 +- 9 files changed, 87 insertions(+), 83 deletions(-) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 6dc808860c..8ad78ffc6e 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -326,7 +326,7 @@ func runStart(cmd *cobra.Command, args []string) { return } - if !driver.BareMetal(driverName) && !driver.IsKIC(driverName) { + if driver.IsVM(driverName) { url, err := download.ISO(viper.GetStringSlice(isoURL), cmd.Flags().Changed(isoURL)) if err != nil { exit.WithError("Failed to cache ISO", err) @@ -348,12 +348,6 @@ func runStart(cmd *cobra.Command, args []string) { } } - // Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot. - // Hence, saveConfig must be called before startHost, and again afterwards when we know the IP. - if err := config.SaveProfile(viper.GetString(config.ProfileName), &cc); err != nil { - exit.WithError("Failed to save config", err) - } - kubeconfig, err := cluster.InitialSetup(cc, n, existingAddons) if err != nil { exit.WithError("Starting node", err) @@ -361,7 +355,7 @@ func runStart(cmd *cobra.Command, args []string) { numNodes := viper.GetInt(nodes) if numNodes > 1 { - if driver.IsKIC(driverName) { + if driver.BareMetal(driverName) { out.T(out.Meh, "The none driver is not compatible with multi-node clusters.") } for i := 1; i < numNodes; i++ { diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm.go b/pkg/minikube/bootstrapper/bsutil/kubeadm.go index 2f21d3563e..fa2e120acb 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm.go @@ -65,11 +65,6 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, r cruntime.Manager, n config.N return nil, errors.Wrap(err, "generating extra component config for kubeadm") } - controlPlaneEndpoint := cp.IP - /*if n.ControlPlane { - controlPlaneEndpoint = "localhost" - }*/ - opts := struct { CertDir string ServiceCIDR string @@ -107,7 +102,7 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, r cruntime.Manager, n config.N NodeIP: n.IP, // NOTE: If set to an specific VM IP, things may break if the IP changes on host restart // For multi-node, we may need to figure out an alternate strategy, like DNS or hosts files - ControlPlaneAddress: controlPlaneEndpoint, + ControlPlaneAddress: cp.IP, } if k8s.ServiceCIDR != "" { diff --git a/pkg/minikube/bootstrapper/bsutil/kubelet.go b/pkg/minikube/bootstrapper/bsutil/kubelet.go index 8ec9d01fc6..ce161b41da 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubelet.go +++ b/pkg/minikube/bootstrapper/bsutil/kubelet.go @@ -26,6 +26,7 @@ import ( "k8s.io/minikube/pkg/minikube/bootstrapper/images" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/cruntime" + "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/util" ) @@ -60,7 +61,7 @@ func extraKubeletOpts(mc config.ClusterConfig, nc config.Node, r cruntime.Manage extraOpts["node-ip"] = cp.IP } if nc.Name != "" { - extraOpts["hostname-override"] = nc.Name + extraOpts["hostname-override"] = driver.MachineName(mc, nc) } pauseImage := images.Pause(version, k8s.ImageRepository) diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index ed458c16b2..09d23e2703 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -202,8 +202,7 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { } - err = k.SetupNode(cfg) - if err != nil { + if err = k.SetupNode(cfg); err != nil { return errors.Wrap(err, "setting up node") } diff --git a/pkg/minikube/cluster/cache.go b/pkg/minikube/cluster/cache.go index 56a58a9246..6fcf303f27 100644 --- a/pkg/minikube/cluster/cache.go +++ b/pkg/minikube/cluster/cache.go @@ -52,7 +52,7 @@ func BeginCacheKubernetesImages(g *errgroup.Group, imageRepository string, k8sVe glog.Warningf("Error downloading preloaded artifacts will continue without preload: %v", err) } - if !viper.GetBool("cache-images") { + if !viper.GetBool(cacheImages) { return } diff --git a/pkg/minikube/cluster/setup.go b/pkg/minikube/cluster/setup.go index cd653bbdbd..e67a4e6ca3 100644 --- a/pkg/minikube/cluster/setup.go +++ b/pkg/minikube/cluster/setup.go @@ -25,6 +25,7 @@ import ( "strings" "time" + "github.com/blang/semver" "github.com/docker/machine/libmachine" "github.com/docker/machine/libmachine/host" "github.com/golang/glog" @@ -37,10 +38,12 @@ import ( "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" + "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/kubeconfig" "k8s.io/minikube/pkg/minikube/localpath" + "k8s.io/minikube/pkg/minikube/logs" "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/proxy" @@ -54,13 +57,44 @@ const ( embedCerts = "embed-certs" keepContext = "keep-context" imageRepository = "image-repository" + containerRuntime = "container-runtime" ) // InitialSetup performs all necessary operations on the initial control plane node when first spinning up a cluster func InitialSetup(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool) (*kubeconfig.Settings, error) { - _, preExists, machineAPI, host := StartMachine(&cc, &n) + var kicGroup errgroup.Group + if driver.IsKIC(cc.Driver) { + BeginDownloadKicArtifacts(&kicGroup) + } + + var cacheGroup errgroup.Group + if !driver.BareMetal(cc.Driver) { + BeginCacheKubernetesImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion, cc.KubernetesConfig.ContainerRuntime) + } + + // Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot. + // Hence, saveConfig must be called before startHost, and again afterwards when we know the IP. + if err := config.SaveProfile(viper.GetString(config.ProfileName), &cc); err != nil { + exit.WithError("Failed to save config", err) + } + + HandleDownloadOnly(&cacheGroup, &kicGroup, n.KubernetesVersion) + WaitDownloadKicArtifacts(&kicGroup) + + mRunner, preExists, machineAPI, host := StartMachine(&cc, &n) defer machineAPI.Close() + // wait for preloaded tarball to finish downloading before configuring runtimes + WaitCacheRequiredImages(&cacheGroup) + + sv, err := util.ParseKubernetesVersion(n.KubernetesVersion) + if err != nil { + return nil, err + } + + // configure the runtime (docker, containerd, crio) + cr := ConfigureRuntimes(mRunner, cc.Driver, cc.KubernetesConfig, sv) + // Must be written before bootstrap, otherwise health checks may flake due to stale IP kubeconfig, err := setupKubeconfig(host, &cc, &n, cc.Name) if err != nil { @@ -70,28 +104,13 @@ func InitialSetup(cc config.ClusterConfig, n config.Node, existingAddons map[str // setup kubeadm (must come after setupKubeconfig) bs := setupKubeAdm(machineAPI, cc, n) - var cacheGroup errgroup.Group - if !driver.BareMetal(cc.Driver) { - BeginCacheKubernetesImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion, cc.KubernetesConfig.ContainerRuntime) - } - - var kicGroup errgroup.Group - if driver.IsKIC(cc.Driver) { - BeginDownloadKicArtifacts(&kicGroup) - } - - HandleDownloadOnly(&cacheGroup, &kicGroup, n.KubernetesVersion) - WaitDownloadKicArtifacts(&kicGroup) - // pull images or restart cluster out.T(out.Launch, "Launching Kubernetes ... ") err = bs.StartCluster(cc) if err != nil { - /*config := cruntime.Config{Type: viper.GetString(containerRuntime), Runner: mRunner, ImageRepository: cc.KubernetesConfig.ImageRepository, KubernetesVersion: cc.KubernetesConfig.KubernetesVersion} - cr, err := cruntime.New(config) - exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, mRunner))*/ - exit.WithError("Error starting cluster", err) + exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, mRunner)) } + //configureMounts() if err := CacheAndLoadImagesInConfig(); err != nil { out.T(out.FailureType, "Unable to load cached images from config file.") @@ -119,6 +138,47 @@ func InitialSetup(cc config.ClusterConfig, n config.Node, existingAddons map[str } +// ConfigureRuntimes does what needs to happen to get a runtime going. +func ConfigureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config.KubernetesConfig, kv semver.Version) cruntime.Manager { + co := cruntime.Config{ + Type: viper.GetString(containerRuntime), + Runner: runner, ImageRepository: k8s.ImageRepository, + KubernetesVersion: kv, + } + cr, err := cruntime.New(co) + if err != nil { + exit.WithError("Failed runtime", err) + } + + disableOthers := true + if driver.BareMetal(drvName) { + disableOthers = false + } + + // Preload is overly invasive for bare metal, and caching is not meaningful. KIC handled elsewhere. + if driver.IsVM(drvName) { + if err := cr.Preload(k8s); err != nil { + switch err.(type) { + case *cruntime.ErrISOFeature: + out.T(out.Tip, "Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'", out.V{"error": err}) + default: + glog.Warningf("%s preload failed: %v, falling back to caching images", cr.Name(), err) + } + + if err := machine.CacheImagesForBootstrapper(k8s.ImageRepository, k8s.KubernetesVersion, viper.GetString(cmdcfg.Bootstrapper)); err != nil { + exit.WithError("Failed to cache images", err) + } + } + } + + err = cr.Enable(disableOthers) + if err != nil { + exit.WithError("Failed to enable container runtime", err) + } + + return cr +} + // setupKubeAdm adds any requested files into the VM before Kubernetes is started func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node) bootstrapper.Bootstrapper { bs, err := Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cfg, n) diff --git a/pkg/minikube/driver/driver.go b/pkg/minikube/driver/driver.go index 2dce6350cd..e064f70799 100644 --- a/pkg/minikube/driver/driver.go +++ b/pkg/minikube/driver/driver.go @@ -112,7 +112,7 @@ func IsMock(name string) bool { // IsVM checks if the driver is a VM func IsVM(name string) bool { - if IsKIC(name) || IsMock(name) || BareMetal(name) { + if IsKIC(name) || BareMetal(name) { return false } return true diff --git a/pkg/minikube/node/config.go b/pkg/minikube/node/config.go index da74bce3db..ef0f66dc12 100644 --- a/pkg/minikube/node/config.go +++ b/pkg/minikube/node/config.go @@ -23,62 +23,17 @@ import ( "path/filepath" "strconv" - "github.com/blang/semver" "github.com/golang/glog" "github.com/spf13/viper" - cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/cruntime" - "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/localpath" - "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/util/lock" ) -// configureRuntimes does what needs to happen to get a runtime going. -func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config.KubernetesConfig, kv semver.Version) cruntime.Manager { - co := cruntime.Config{ - Type: viper.GetString(containerRuntime), - Runner: runner, ImageRepository: k8s.ImageRepository, - KubernetesVersion: kv, - } - cr, err := cruntime.New(co) - if err != nil { - exit.WithError("Failed runtime", err) - } - - disableOthers := true - if driver.BareMetal(drvName) { - disableOthers = false - } - - // Preload is overly invasive for bare metal, and caching is not meaningful. KIC handled elsewhere. - if driver.IsVM(drvName) { - if err := cr.Preload(k8s); err != nil { - switch err.(type) { - case *cruntime.ErrISOFeature: - out.T(out.Tip, "Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'", out.V{"error": err}) - default: - glog.Warningf("%s preload failed: %v, falling back to caching images", cr.Name(), err) - } - - if err := machine.CacheImagesForBootstrapper(k8s.ImageRepository, k8s.KubernetesVersion, viper.GetString(cmdcfg.Bootstrapper)); err != nil { - exit.WithError("Failed to cache images", err) - } - } - } - - err = cr.Enable(disableOthers) - if err != nil { - exit.WithError("Failed to enable container runtime", err) - } - - return cr -} - func showVersionInfo(k8sVersion string, cr cruntime.Manager) { version, _ := cr.Version() out.T(cr.Style(), "Preparing Kubernetes {{.k8sVersion}} on {{.runtime}} {{.runtimeVersion}} ...", out.V{"k8sVersion": k8sVersion, "runtime": cr.Name(), "runtimeVersion": version}) diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 599d4621aa..0a5e3fc095 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -64,7 +64,7 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo } // configure the runtime (docker, containerd, crio) - cr := configureRuntimes(runner, driverName, cc.KubernetesConfig, sv) + cr := cluster.ConfigureRuntimes(runner, driverName, cc.KubernetesConfig, sv) showVersionInfo(k8sVersion, cr) configureMounts() From 577dfa339339d194f55a83b27bedee1ed0c46130 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 16 Mar 2020 11:53:07 -0700 Subject: [PATCH 29/42] it works again --- pkg/minikube/bootstrapper/bsutil/kubeadm.go | 24 ++++++++++--------- .../bootstrapper/bsutil/kubeadm_test.go | 4 ++-- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 4 ++-- 3 files changed, 17 insertions(+), 15 deletions(-) diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm.go b/pkg/minikube/bootstrapper/bsutil/kubeadm.go index fa2e120acb..4004ac2e16 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm.go @@ -29,6 +29,7 @@ import ( "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/cruntime" + "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/vmpath" "k8s.io/minikube/pkg/util" ) @@ -37,7 +38,7 @@ import ( const remoteContainerRuntime = "remote" // GenerateKubeadmYAML generates the kubeadm.yaml file -func GenerateKubeadmYAML(cc config.ClusterConfig, r cruntime.Manager, n config.Node) ([]byte, error) { +func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Manager) ([]byte, error) { k8s := cc.KubernetesConfig version, err := util.ParseKubernetesVersion(k8s.KubernetesVersion) if err != nil { @@ -87,19 +88,20 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, r cruntime.Manager, n config.N CertDir: vmpath.GuestKubernetesCertsDir, ServiceCIDR: constants.DefaultServiceCIDR, PodSubnet: k8s.ExtraOptions.Get("pod-network-cidr", Kubeadm), - AdvertiseAddress: cp.IP, + AdvertiseAddress: n.IP, APIServerPort: nodePort, KubernetesVersion: k8s.KubernetesVersion, EtcdDataDir: EtcdDataDir(), - ClusterName: k8s.ClusterName, - NodeName: n.Name, - CRISocket: r.SocketPath(), - ImageRepository: k8s.ImageRepository, - ComponentOptions: componentOpts, - FeatureArgs: kubeadmFeatureArgs, - NoTaintMaster: false, // That does not work with k8s 1.12+ - DNSDomain: k8s.DNSDomain, - NodeIP: n.IP, + ClusterName: cc.Name, + //kubeadm uses NodeName as the --hostname-override parameter, so this needs to be the name of the machine + NodeName: driver.MachineName(cc, n), + CRISocket: r.SocketPath(), + ImageRepository: k8s.ImageRepository, + ComponentOptions: componentOpts, + FeatureArgs: kubeadmFeatureArgs, + NoTaintMaster: false, // That does not work with k8s 1.12+ + DNSDomain: k8s.DNSDomain, + NodeIP: n.IP, // NOTE: If set to an specific VM IP, things may break if the IP changes on host restart // For multi-node, we may need to figure out an alternate strategy, like DNS or hosts files ControlPlaneAddress: cp.IP, diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go b/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go index 4c366bd96b..806359513a 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go @@ -129,7 +129,7 @@ func TestGenerateKubeadmYAMLDNS(t *testing.T) { cfg.KubernetesConfig.KubernetesVersion = version + ".0" cfg.KubernetesConfig.ClusterName = "kubernetes" - got, err := GenerateKubeadmYAML(cfg, runtime, cfg.Nodes[0]) + got, err := GenerateKubeadmYAML(cfg, cfg.Nodes[0], runtime) if err != nil && !tc.shouldErr { t.Fatalf("got unexpected error generating config: %v", err) } @@ -210,7 +210,7 @@ func TestGenerateKubeadmYAML(t *testing.T) { cfg.KubernetesConfig.KubernetesVersion = version + ".0" cfg.KubernetesConfig.ClusterName = "kubernetes" - got, err := GenerateKubeadmYAML(cfg, runtime, cfg.Nodes[0]) + got, err := GenerateKubeadmYAML(cfg, cfg.Nodes[0], runtime) if err != nil && !tc.shouldErr { t.Fatalf("got unexpected error generating config: %v", err) } diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 73ec4e4ced..11b2e4d426 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -386,7 +386,7 @@ func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinC }() // Join the master by specifying its token - joinCmd = fmt.Sprintf("%s --v=10 --node-name=%s", joinCmd, n.Name) + joinCmd = fmt.Sprintf("%s --v=10 --node-name=%s", joinCmd, driver.MachineName(cc, n)) out, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", joinCmd)) if err != nil { return errors.Wrapf(err, "cmd failed: %s\n%+v\n", joinCmd, out) @@ -473,7 +473,7 @@ func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error { // UpdateNode updates a node. func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cruntime.Manager) error { - kubeadmCfg, err := bsutil.GenerateKubeadmYAML(cfg, r, n) + kubeadmCfg, err := bsutil.GenerateKubeadmYAML(cfg, n, r) if err != nil { return errors.Wrap(err, "generating kubeadm cfg") } From c1c26538797f67a4437f8dd6a572260676a2a8e5 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 16 Mar 2020 12:25:59 -0700 Subject: [PATCH 30/42] fix unit tests --- .../bootstrapper/bsutil/ktmpl/v1beta2.go | 2 +- pkg/minikube/bootstrapper/bsutil/kubeadm.go | 1 - .../bootstrapper/bsutil/kubeadm_test.go | 20 +++++++++---------- .../testdata/v1.11/containerd-api-port.yaml | 2 +- .../v1.11/containerd-pod-network-cidr.yaml | 2 +- .../bsutil/testdata/v1.11/containerd.yaml | 2 +- .../testdata/v1.11/crio-options-gates.yaml | 2 +- .../bsutil/testdata/v1.11/crio.yaml | 2 +- .../bsutil/testdata/v1.11/default.yaml | 2 +- .../testdata/v1.11/image-repository.yaml | 2 +- .../bsutil/testdata/v1.11/options.yaml | 2 +- .../testdata/v1.12/containerd-api-port.yaml | 4 ++-- .../v1.12/containerd-pod-network-cidr.yaml | 4 ++-- .../bsutil/testdata/v1.12/containerd.yaml | 4 ++-- .../testdata/v1.12/crio-options-gates.yaml | 4 ++-- .../bsutil/testdata/v1.12/crio.yaml | 4 ++-- .../bsutil/testdata/v1.12/default.yaml | 4 ++-- .../bsutil/testdata/v1.12/dns.yaml | 4 ++-- .../testdata/v1.12/image-repository.yaml | 4 ++-- .../bsutil/testdata/v1.12/options.yaml | 4 ++-- .../testdata/v1.13/containerd-api-port.yaml | 4 ++-- .../v1.13/containerd-pod-network-cidr.yaml | 4 ++-- .../bsutil/testdata/v1.13/containerd.yaml | 4 ++-- .../testdata/v1.13/crio-options-gates.yaml | 4 ++-- .../bsutil/testdata/v1.13/crio.yaml | 4 ++-- .../bsutil/testdata/v1.13/default.yaml | 4 ++-- .../bsutil/testdata/v1.13/dns.yaml | 4 ++-- .../testdata/v1.13/image-repository.yaml | 4 ++-- .../bsutil/testdata/v1.13/options.yaml | 4 ++-- .../testdata/v1.14/containerd-api-port.yaml | 4 ++-- .../v1.14/containerd-pod-network-cidr.yaml | 4 ++-- .../bsutil/testdata/v1.14/containerd.yaml | 4 ++-- .../testdata/v1.14/crio-options-gates.yaml | 4 ++-- .../bsutil/testdata/v1.14/crio.yaml | 4 ++-- .../bsutil/testdata/v1.14/default.yaml | 4 ++-- .../bsutil/testdata/v1.14/dns.yaml | 4 ++-- .../testdata/v1.14/image-repository.yaml | 4 ++-- .../bsutil/testdata/v1.14/options.yaml | 4 ++-- .../testdata/v1.15/containerd-api-port.yaml | 4 ++-- .../v1.15/containerd-pod-network-cidr.yaml | 4 ++-- .../bsutil/testdata/v1.15/containerd.yaml | 4 ++-- .../testdata/v1.15/crio-options-gates.yaml | 4 ++-- .../bsutil/testdata/v1.15/crio.yaml | 4 ++-- .../bsutil/testdata/v1.15/default.yaml | 4 ++-- .../bsutil/testdata/v1.15/dns.yaml | 4 ++-- .../testdata/v1.15/image-repository.yaml | 4 ++-- .../bsutil/testdata/v1.15/options.yaml | 4 ++-- .../testdata/v1.16/containerd-api-port.yaml | 4 ++-- .../v1.16/containerd-pod-network-cidr.yaml | 4 ++-- .../bsutil/testdata/v1.16/containerd.yaml | 4 ++-- .../testdata/v1.16/crio-options-gates.yaml | 4 ++-- .../bsutil/testdata/v1.16/crio.yaml | 4 ++-- .../bsutil/testdata/v1.16/default.yaml | 4 ++-- .../bsutil/testdata/v1.16/dns.yaml | 4 ++-- .../testdata/v1.16/image-repository.yaml | 4 ++-- .../bsutil/testdata/v1.16/options.yaml | 4 ++-- .../testdata/v1.17/containerd-api-port.yaml | 4 ++-- .../v1.17/containerd-pod-network-cidr.yaml | 4 ++-- .../bsutil/testdata/v1.17/containerd.yaml | 4 ++-- .../testdata/v1.17/crio-options-gates.yaml | 4 ++-- .../bsutil/testdata/v1.17/crio.yaml | 4 ++-- .../bsutil/testdata/v1.17/default.yaml | 4 ++-- .../bsutil/testdata/v1.17/dns.yaml | 4 ++-- .../testdata/v1.17/image-repository.yaml | 4 ++-- .../bsutil/testdata/v1.17/options.yaml | 4 ++-- .../testdata/v1.18/containerd-api-port.yaml | 4 ++-- .../v1.18/containerd-pod-network-cidr.yaml | 4 ++-- .../bsutil/testdata/v1.18/containerd.yaml | 4 ++-- .../testdata/v1.18/crio-options-gates.yaml | 4 ++-- .../bsutil/testdata/v1.18/crio.yaml | 4 ++-- .../bsutil/testdata/v1.18/default.yaml | 4 ++-- .../bsutil/testdata/v1.18/dns.yaml | 4 ++-- .../testdata/v1.18/image-repository.yaml | 4 ++-- .../bsutil/testdata/v1.18/options.yaml | 4 ++-- .../testdata/v1.19/containerd-api-port.yaml | 4 ++-- .../v1.19/containerd-pod-network-cidr.yaml | 4 ++-- .../bsutil/testdata/v1.19/containerd.yaml | 4 ++-- .../testdata/v1.19/crio-options-gates.yaml | 4 ++-- .../bsutil/testdata/v1.19/crio.yaml | 4 ++-- .../bsutil/testdata/v1.19/default.yaml | 4 ++-- .../bsutil/testdata/v1.19/dns.yaml | 4 ++-- .../testdata/v1.19/image-repository.yaml | 4 ++-- .../bsutil/testdata/v1.19/options.yaml | 4 ++-- pkg/minikube/node/node.go | 5 ++--- 84 files changed, 165 insertions(+), 167 deletions(-) diff --git a/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta2.go b/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta2.go index c4718f9c98..c00835e8e7 100644 --- a/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta2.go +++ b/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta2.go @@ -56,7 +56,7 @@ kind: ClusterConfiguration {{range $i, $val := .FeatureArgs}}{{$i}}: {{$val}} {{end -}}{{end -}} certificatesDir: {{.CertDir}} -clusterName: kubernetes +clusterName: mk controlPlaneEndpoint: {{.ControlPlaneAddress}}:{{.APIServerPort}} controllerManager: {} dns: diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm.go b/pkg/minikube/bootstrapper/bsutil/kubeadm.go index 4004ac2e16..8b675ae644 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm.go @@ -129,7 +129,6 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana if err := configTmpl.Execute(&b, opts); err != nil { return nil, err } - fmt.Printf("%s OPTS=%+v\n", n.Name, opts) glog.Infof("kubeadm config:\n%s\n", b.String()) return b.Bytes(), nil } diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go b/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go index 806359513a..a2b53c3ea1 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go @@ -108,7 +108,7 @@ func TestGenerateKubeadmYAMLDNS(t *testing.T) { shouldErr bool cfg config.ClusterConfig }{ - {"dns", "docker", false, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{DNSDomain: "1.1.1.1"}}}, + {"dns", "docker", false, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{DNSDomain: "1.1.1.1"}}}, } for _, version := range versions { for _, tc := range tests { @@ -174,15 +174,15 @@ func TestGenerateKubeadmYAML(t *testing.T) { shouldErr bool cfg config.ClusterConfig }{ - {"default", "docker", false, config.ClusterConfig{}}, - {"containerd", "containerd", false, config.ClusterConfig{}}, - {"crio", "crio", false, config.ClusterConfig{}}, - {"options", "docker", false, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOpts}}}, - {"crio-options-gates", "crio", false, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOpts, FeatureGates: "a=b"}}}, - {"unknown-component", "docker", true, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: config.ExtraOptionSlice{config.ExtraOption{Component: "not-a-real-component", Key: "killswitch", Value: "true"}}}}}, - {"containerd-api-port", "containerd", false, config.ClusterConfig{Nodes: []config.Node{{Port: 12345}}}}, - {"containerd-pod-network-cidr", "containerd", false, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOptsPodCidr}}}, - {"image-repository", "docker", false, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{ImageRepository: "test/repo"}}}, + {"default", "docker", false, config.ClusterConfig{Name: "mk"}}, + {"containerd", "containerd", false, config.ClusterConfig{Name: "mk"}}, + {"crio", "crio", false, config.ClusterConfig{Name: "mk"}}, + {"options", "docker", false, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOpts}}}, + {"crio-options-gates", "crio", false, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOpts, FeatureGates: "a=b"}}}, + {"unknown-component", "docker", true, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ExtraOptions: config.ExtraOptionSlice{config.ExtraOption{Component: "not-a-real-component", Key: "killswitch", Value: "true"}}}}}, + {"containerd-api-port", "containerd", false, config.ClusterConfig{Name: "mk", Nodes: []config.Node{{Port: 12345}}}}, + {"containerd-pod-network-cidr", "containerd", false, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOptsPodCidr}}}, + {"image-repository", "docker", false, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ImageRepository: "test/repo"}}}, } for _, version := range versions { for _, tc := range tests { diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd-api-port.yaml index 7d94020c6f..ae79c8aa7a 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd-api-port.yaml @@ -4,7 +4,7 @@ noTaintMaster: true api: advertiseAddress: 1.1.1.1 bindPort: 12345 - controlPlaneEndpoint: localhost + controlPlaneEndpoint: 1.1.1.1 kubernetesVersion: v1.11.0 certificatesDir: /var/lib/minikube/certs networking: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd-pod-network-cidr.yaml index f66eec734e..a8ce3c8dc7 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd-pod-network-cidr.yaml @@ -4,7 +4,7 @@ noTaintMaster: true api: advertiseAddress: 1.1.1.1 bindPort: 8443 - controlPlaneEndpoint: localhost + controlPlaneEndpoint: 1.1.1.1 kubernetesVersion: v1.11.0 certificatesDir: /var/lib/minikube/certs networking: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd.yaml index f66eec734e..a8ce3c8dc7 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd.yaml @@ -4,7 +4,7 @@ noTaintMaster: true api: advertiseAddress: 1.1.1.1 bindPort: 8443 - controlPlaneEndpoint: localhost + controlPlaneEndpoint: 1.1.1.1 kubernetesVersion: v1.11.0 certificatesDir: /var/lib/minikube/certs networking: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/crio-options-gates.yaml index 30b1986325..1a4d370e84 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/crio-options-gates.yaml @@ -4,7 +4,7 @@ noTaintMaster: true api: advertiseAddress: 1.1.1.1 bindPort: 8443 - controlPlaneEndpoint: localhost + controlPlaneEndpoint: 1.1.1.1 kubernetesVersion: v1.11.0 certificatesDir: /var/lib/minikube/certs networking: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/crio.yaml index 4693643125..e179fbf4e3 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/crio.yaml @@ -4,7 +4,7 @@ noTaintMaster: true api: advertiseAddress: 1.1.1.1 bindPort: 8443 - controlPlaneEndpoint: localhost + controlPlaneEndpoint: 1.1.1.1 kubernetesVersion: v1.11.0 certificatesDir: /var/lib/minikube/certs networking: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/default.yaml index 5c2861101e..68429da7bc 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/default.yaml @@ -4,7 +4,7 @@ noTaintMaster: true api: advertiseAddress: 1.1.1.1 bindPort: 8443 - controlPlaneEndpoint: localhost + controlPlaneEndpoint: 1.1.1.1 kubernetesVersion: v1.11.0 certificatesDir: /var/lib/minikube/certs networking: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/image-repository.yaml index 7d383865f8..651706493c 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/image-repository.yaml @@ -4,7 +4,7 @@ noTaintMaster: true api: advertiseAddress: 1.1.1.1 bindPort: 8443 - controlPlaneEndpoint: localhost + controlPlaneEndpoint: 1.1.1.1 kubernetesVersion: v1.11.0 certificatesDir: /var/lib/minikube/certs networking: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/options.yaml index 26fbfead4b..5b192e1cfd 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/options.yaml @@ -4,7 +4,7 @@ noTaintMaster: true api: advertiseAddress: 1.1.1.1 bindPort: 8443 - controlPlaneEndpoint: localhost + controlPlaneEndpoint: 1.1.1.1 kubernetesVersion: v1.11.0 certificatesDir: /var/lib/minikube/certs networking: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-api-port.yaml index ba34af30df..adf230658d 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-api-port.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:12345 +controlPlaneEndpoint: 1.1.1.1:12345 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-pod-network-cidr.yaml index 0d821692e5..300ee2825f 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-pod-network-cidr.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd.yaml index 8ac889649f..9866d944d9 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio-options-gates.yaml index 5fb536a9f5..c8e2fbb46a 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio-options-gates.yaml @@ -30,9 +30,9 @@ schedulerExtraArgs: feature-gates: "a=b" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio.yaml index a2e258468b..834021df94 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/default.yaml index 6db4345453..3c8b8b41a8 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/default.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/dns.yaml index e0b60901ab..d6154f4ecd 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/dns.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/image-repository.yaml index 595bd0c94c..e9dd51d811 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/image-repository.yaml @@ -23,9 +23,9 @@ imageRepository: test/repo apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/options.yaml index 04237f4db1..a49db3c29f 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/options.yaml @@ -27,9 +27,9 @@ controllerManagerExtraArgs: schedulerExtraArgs: scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-api-port.yaml index e4e9c885b2..8d90c3e212 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-api-port.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:12345 +controlPlaneEndpoint: 1.1.1.1:12345 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-pod-network-cidr.yaml index ee58cf2201..1788a1adb8 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-pod-network-cidr.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd.yaml index a719307679..770f46cc0f 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio-options-gates.yaml index be69a16ec7..326912679e 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio-options-gates.yaml @@ -30,9 +30,9 @@ schedulerExtraArgs: feature-gates: "a=b" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio.yaml index c195ffc2ba..08646f704f 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/default.yaml index f7fc9b5199..25d166e0dc 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/default.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/dns.yaml index d9bb198b8f..eb057faf76 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/dns.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/image-repository.yaml index 0a1e7bab7b..d828d72006 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/image-repository.yaml @@ -23,9 +23,9 @@ imageRepository: test/repo apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/options.yaml index 3aa0b74754..5fe5d326bc 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/options.yaml @@ -27,9 +27,9 @@ controllerManagerExtraArgs: schedulerExtraArgs: scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-api-port.yaml index 741ad12afb..64efcf3938 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-api-port.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:12345 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:12345 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-pod-network-cidr.yaml index 54abf05793..6ef28c1c8d 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-pod-network-cidr.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd.yaml index df4740aaeb..97b4065593 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio-options-gates.yaml index 513e1f803a..cf8a3e4728 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio-options-gates.yaml @@ -34,8 +34,8 @@ scheduler: feature-gates: "a=b" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio.yaml index 1053c5c42f..3ef27c9b9f 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/default.yaml index 117c9070bf..746eb9fb7d 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/default.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/dns.yaml index 67c0df83a3..a4e2567756 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/dns.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/image-repository.yaml index c720ebac42..aedd2a9047 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/image-repository.yaml @@ -25,8 +25,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/options.yaml index 35aa4982b2..81980c953d 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/options.yaml @@ -31,8 +31,8 @@ scheduler: extraArgs: scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-api-port.yaml index 3048061426..4e6bbead95 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-api-port.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:12345 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:12345 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-pod-network-cidr.yaml index 3a180ccafe..9a9a5c60f6 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-pod-network-cidr.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd.yaml index 75a083a4ce..cacacc7e43 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio-options-gates.yaml index 587faaf4de..c78edc0119 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio-options-gates.yaml @@ -34,8 +34,8 @@ scheduler: feature-gates: "a=b" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio.yaml index 680b24fe8d..47db96b5c2 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/default.yaml index 4ac5254431..d68ef1b1f2 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/default.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/dns.yaml index 2403f96063..1e79a74a1c 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/dns.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/image-repository.yaml index 9e3d3e5088..f11df32d8b 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/image-repository.yaml @@ -25,8 +25,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/options.yaml index cf7d8c2964..d277ac59e6 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/options.yaml @@ -31,8 +31,8 @@ scheduler: extraArgs: scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-api-port.yaml index 2f1d050a40..758f7b2f62 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-api-port.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:12345 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:12345 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-pod-network-cidr.yaml index ad749f03cc..15802a1859 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-pod-network-cidr.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd.yaml index ddc2d7cf74..0876e3bdde 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio-options-gates.yaml index adbc88e1d7..6ca53c67e9 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio-options-gates.yaml @@ -34,8 +34,8 @@ scheduler: feature-gates: "a=b" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio.yaml index d401b50e81..0b87277ba2 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/default.yaml index bf4ee2a96a..765a4b2398 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/default.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/dns.yaml index 9b464ae194..1105d6fc3c 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/dns.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/image-repository.yaml index 140db5ca32..5b78859ead 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/image-repository.yaml @@ -25,8 +25,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/options.yaml index c7623c0e0f..cb4d159683 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/options.yaml @@ -31,8 +31,8 @@ scheduler: extraArgs: scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-api-port.yaml index f10bad3678..240d23984b 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-api-port.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:12345 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:12345 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-pod-network-cidr.yaml index a8b4286a19..0ce3766fdb 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-pod-network-cidr.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd.yaml index 4eb28ddba9..c5ecd93bc5 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio-options-gates.yaml index 78f465b4ab..cae9608f04 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio-options-gates.yaml @@ -34,8 +34,8 @@ scheduler: feature-gates: "a=b" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio.yaml index 179f18da35..d86d853915 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/default.yaml index b223f2a7c5..eef9a6c7ae 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/default.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/dns.yaml index 5e8102e6ed..86f4d03bd9 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/dns.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/image-repository.yaml index 676e6de52b..7b215a01b7 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/image-repository.yaml @@ -25,8 +25,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/options.yaml index 608569fb84..dba5ff15d2 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/options.yaml @@ -31,8 +31,8 @@ scheduler: extraArgs: scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-api-port.yaml index af5af1f022..77acbb9ed7 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-api-port.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:12345 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:12345 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-pod-network-cidr.yaml index b1f3d8214f..5abe34481e 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-pod-network-cidr.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd.yaml index 317e578be8..a53b109047 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio-options-gates.yaml index 402ef57a02..0235b34b1f 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio-options-gates.yaml @@ -34,8 +34,8 @@ scheduler: feature-gates: "a=b" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio.yaml index 74a7bd1536..d2907dcbc3 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/default.yaml index 7caa19fa2b..c921f54cdf 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/default.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/dns.yaml index 80a20ba800..bd13212add 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/dns.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/image-repository.yaml index 7c1b7989db..ef954470c0 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/image-repository.yaml @@ -25,8 +25,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/options.yaml index 6205c948bc..743cbb4e2d 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/options.yaml @@ -31,8 +31,8 @@ scheduler: extraArgs: scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-api-port.yaml index ba8872e7ce..1f5a6f8df8 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-api-port.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:12345 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:12345 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-pod-network-cidr.yaml index 6b1a12c922..4cbe5b28d8 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-pod-network-cidr.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd.yaml index c92bd1a314..c2b2c89a2b 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio-options-gates.yaml index 405c3354d2..21f5c1080a 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio-options-gates.yaml @@ -34,8 +34,8 @@ scheduler: feature-gates: "a=b" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio.yaml index c1dd4916df..eb0abc25b0 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/default.yaml index 9f76b719a7..692a5925a3 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/default.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/dns.yaml index bc0db1cb07..e384cdda1f 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/dns.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/image-repository.yaml index 14cc7bb8b6..7b50680f2c 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/image-repository.yaml @@ -25,8 +25,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/options.yaml index 7b60865e15..a134555f6a 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/options.yaml @@ -31,8 +31,8 @@ scheduler: extraArgs: scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 controllerManager: {} dns: type: CoreDNS diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index 41d518f3f2..7458ff80a3 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -27,9 +27,8 @@ import ( // TODO: Share these between cluster and node packages const ( - containerRuntime = "container-runtime" - mountString = "mount-string" - createMount = "mount" + mountString = "mount-string" + createMount = "mount" ) // Add adds a new node config to an existing cluster. From 376111bae8c98b5ced850ed8e4745143bcf3e7d9 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 16 Mar 2020 12:38:59 -0700 Subject: [PATCH 31/42] fix unit tests pt 2 --- pkg/minikube/bootstrapper/bsutil/kubelet_test.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pkg/minikube/bootstrapper/bsutil/kubelet_test.go b/pkg/minikube/bootstrapper/bsutil/kubelet_test.go index 3019ee1f52..1af1dc525c 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubelet_test.go +++ b/pkg/minikube/bootstrapper/bsutil/kubelet_test.go @@ -37,6 +37,7 @@ func TestGenerateKubeletConfig(t *testing.T) { { description: "old docker", cfg: config.ClusterConfig{ + Name: "minikube", KubernetesConfig: config.KubernetesConfig{ KubernetesVersion: constants.OldestKubernetesVersion, ContainerRuntime: "docker", @@ -62,6 +63,7 @@ ExecStart=/var/lib/minikube/binaries/v1.11.10/kubelet --allow-privileged=true -- { description: "newest cri runtime", cfg: config.ClusterConfig{ + Name: "minikube", KubernetesConfig: config.KubernetesConfig{ KubernetesVersion: constants.NewestKubernetesVersion, ContainerRuntime: "cri-o", @@ -87,6 +89,7 @@ ExecStart=/var/lib/minikube/binaries/v1.18.0-beta.2/kubelet --authorization-mode { description: "default containerd runtime", cfg: config.ClusterConfig{ + Name: "minikube", KubernetesConfig: config.KubernetesConfig{ KubernetesVersion: constants.DefaultKubernetesVersion, ContainerRuntime: "containerd", @@ -112,6 +115,7 @@ ExecStart=/var/lib/minikube/binaries/v1.17.3/kubelet --authorization-mode=Webhoo { description: "default containerd runtime with IP override", cfg: config.ClusterConfig{ + Name: "minikube", KubernetesConfig: config.KubernetesConfig{ KubernetesVersion: constants.DefaultKubernetesVersion, ContainerRuntime: "containerd", @@ -144,6 +148,7 @@ ExecStart=/var/lib/minikube/binaries/v1.17.3/kubelet --authorization-mode=Webhoo { description: "docker with custom image repository", cfg: config.ClusterConfig{ + Name: "minikube", KubernetesConfig: config.KubernetesConfig{ KubernetesVersion: constants.DefaultKubernetesVersion, ContainerRuntime: "docker", From f99d335fed4755e512e22da115a80121b3c45fbe Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 16 Mar 2020 15:36:05 -0700 Subject: [PATCH 32/42] fix docker driver --- cmd/minikube/cmd/start.go | 25 +++++++++++++------------ pkg/minikube/driver/driver.go | 8 ++++++++ pkg/provision/provision.go | 3 ++- 3 files changed, 23 insertions(+), 13 deletions(-) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 3cfdc19f39..6b063a88ba 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -357,18 +357,19 @@ func runStart(cmd *cobra.Command, args []string) { if numNodes > 1 { if driver.BareMetal(driverName) { out.T(out.Meh, "The none driver is not compatible with multi-node clusters.") - } - for i := 1; i < numNodes; i++ { - nodeName := fmt.Sprintf("m%02d", i+1) - n := config.Node{ - Name: nodeName, - Worker: true, - ControlPlane: false, - KubernetesVersion: cc.KubernetesConfig.KubernetesVersion, - } - err := node.Add(&cc, n) - if err != nil { - exit.WithError("adding node", err) + } else { + for i := 1; i < numNodes; i++ { + nodeName := fmt.Sprintf("m%02d", i+1) + n := config.Node{ + Name: nodeName, + Worker: true, + ControlPlane: false, + KubernetesVersion: cc.KubernetesConfig.KubernetesVersion, + } + err := node.Add(&cc, n) + if err != nil { + exit.WithError("adding node", err) + } } } } diff --git a/pkg/minikube/driver/driver.go b/pkg/minikube/driver/driver.go index e064f70799..170b99e71a 100644 --- a/pkg/minikube/driver/driver.go +++ b/pkg/minikube/driver/driver.go @@ -236,3 +236,11 @@ func MachineName(cc config.ClusterConfig, n config.Node) string { } return fmt.Sprintf("%s-%s", cc.Name, n.Name) } + +// ClusterNameFromMachine retrieves the cluster name embedded in the machine name +func ClusterNameFromMachine(name string) string { + if strings.Contains(name, "-") { + return strings.Split(name, "-")[0] + } + return name +} diff --git a/pkg/provision/provision.go b/pkg/provision/provision.go index ff5f08fef8..fd84405266 100644 --- a/pkg/provision/provision.go +++ b/pkg/provision/provision.go @@ -39,6 +39,7 @@ import ( "k8s.io/minikube/pkg/minikube/assets" "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/sshutil" ) @@ -195,7 +196,7 @@ func setRemoteAuthOptions(p provision.Provisioner) auth.Options { } func setContainerRuntimeOptions(name string, p miniProvisioner) error { - cluster := strings.Split(name, "-")[0] + cluster := driver.ClusterNameFromMachine(name) c, err := config.Load(cluster) if err != nil { return errors.Wrap(err, "getting cluster config") From 84939da8e3c27407dbf6c518682d636f6d21db24 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Tue, 17 Mar 2020 00:58:46 -0700 Subject: [PATCH 33/42] fix docker driver again --- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 8 ++-- pkg/minikube/cluster/setup.go | 42 +++++++++++++++----- 2 files changed, 37 insertions(+), 13 deletions(-) diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 11b2e4d426..6bd2403b71 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -202,16 +202,16 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { } - if err = k.SetupNode(cfg); err != nil { - return errors.Wrap(err, "setting up node") - } - c := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s init --config %s %s --ignore-preflight-errors=%s", bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), bsutil.KubeadmYamlPath, extraFlags, strings.Join(ignore, ","))) rr, err := k.c.RunCmd(c) if err != nil { return errors.Wrapf(err, "init failed. output: %q", rr.Output()) } + if err = k.SetupNode(cfg); err != nil { + return errors.Wrap(err, "setting up node") + } + if err := bsutil.AdjustResourceLimits(k.c); err != nil { glog.Warningf("unable to adjust resource limits: %v", err) } diff --git a/pkg/minikube/cluster/setup.go b/pkg/minikube/cluster/setup.go index e67a4e6ca3..513154ccce 100644 --- a/pkg/minikube/cluster/setup.go +++ b/pkg/minikube/cluster/setup.go @@ -29,10 +29,12 @@ import ( "github.com/docker/machine/libmachine" "github.com/docker/machine/libmachine/host" "github.com/golang/glog" + "github.com/pkg/errors" "github.com/spf13/viper" "golang.org/x/sync/errgroup" cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" "k8s.io/minikube/pkg/addons" + "k8s.io/minikube/pkg/drivers/kic/oci" "k8s.io/minikube/pkg/minikube/bootstrapper" "k8s.io/minikube/pkg/minikube/bootstrapper/images" "k8s.io/minikube/pkg/minikube/command" @@ -198,18 +200,14 @@ func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node) return bs } -func setupKubeconfig(h *host.Host, c *config.ClusterConfig, n *config.Node, clusterName string) (*kubeconfig.Settings, error) { - addr, err := h.Driver.GetURL() +func setupKubeconfig(h *host.Host, cc *config.ClusterConfig, n *config.Node, clusterName string) (*kubeconfig.Settings, error) { + addr, err := apiServerURL(*h, *cc, *n) if err != nil { - exit.WithError("Failed to get driver URL", err) - } - if !driver.IsKIC(h.DriverName) { - addr = strings.Replace(addr, "tcp://", "https://", -1) - addr = strings.Replace(addr, ":2376", ":"+strconv.Itoa(n.Port), -1) + exit.WithError("Failed to get API Server URL", err) } - if c.KubernetesConfig.APIServerName != constants.APIServerName { - addr = strings.Replace(addr, n.IP, c.KubernetesConfig.APIServerName, -1) + if cc.KubernetesConfig.APIServerName != constants.APIServerName { + addr = strings.Replace(addr, n.IP, cc.KubernetesConfig.APIServerName, -1) } kcs := &kubeconfig.Settings{ ClusterName: clusterName, @@ -228,6 +226,31 @@ func setupKubeconfig(h *host.Host, c *config.ClusterConfig, n *config.Node, clus return kcs, nil } +func apiServerURL(h host.Host, cc config.ClusterConfig, n config.Node) (string, error) { + hostname := "" + port := n.Port + var err error + if driver.IsKIC(h.DriverName) { + // for kic drivers we use 127.0.0.1 instead of node IP, + // because of Docker on MacOs limitations for reaching to container's IP. + hostname = oci.DefaultBindIPV4 + port, err = oci.ForwardedPort(h.DriverName, h.Name, port) + if err != nil { + return "", errors.Wrap(err, "host port binding") + } + } else { + hostname, err = h.Driver.GetIP() + if err != nil { + return "", errors.Wrap(err, "get ip") + } + } + + if cc.KubernetesConfig.APIServerName != constants.APIServerName { + hostname = cc.KubernetesConfig.APIServerName + } + return fmt.Sprintf("https://" + net.JoinHostPort(hostname, strconv.Itoa(port))), nil +} + // StartMachine starts a VM func StartMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) { m, err := machine.NewAPIClient() @@ -248,6 +271,7 @@ func StartMachine(cfg *config.ClusterConfig, node *config.Node) (runner command. out.ErrT(out.FailureType, "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.", out.V{"ip": ip}) } + // Save IP to config file for subsequent use node.IP = ip err = config.SaveNode(cfg, node) if err != nil { From 66a6f4e9060b85d3e4f6ec92c8f33cb32472a70c Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Tue, 17 Mar 2020 12:28:44 -0700 Subject: [PATCH 34/42] fix docker status --- cmd/minikube/cmd/node_add.go | 5 +++++ pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 3 ++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index a450684ec3..1e28103142 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -23,6 +23,7 @@ import ( "github.com/spf13/pflag" "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/out" @@ -43,6 +44,10 @@ var nodeAddCmd = &cobra.Command{ exit.WithError("Error getting config", err) } + if driver.BareMetal(cc.Driver) { + out.ErrT(out.FailureType, "none driver does not support multi-node clusters") + } + name := fmt.Sprintf("m%02d", len(cc.Nodes)+1) out.T(out.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": profile}) diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 6bd2403b71..0863688ea8 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -276,10 +276,11 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time port := n.Port if driver.IsKIC(cfg.Driver) { ip = oci.DefaultBindIPV4 - port, err := oci.ForwardedPort(cfg.Driver, driver.MachineName(cfg, n), port) + p, err := oci.ForwardedPort(cfg.Driver, driver.MachineName(cfg, n), port) if err != nil { return errors.Wrapf(err, "get host-bind port %d for container %s", port, cfg.Name) } + port = p } if n.ControlPlane { if err := kverify.APIServerIsRunning(start, ip, port, timeout); err != nil { From a24aa5dff72fcff3dfe4be0a5620d8131d96fd88 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 18 Mar 2020 15:04:56 -0700 Subject: [PATCH 35/42] dramatically simplify start code path --- cmd/minikube/cmd/cache.go | 4 +- cmd/minikube/cmd/kubectl.go | 4 +- cmd/minikube/cmd/node_start.go | 2 +- cmd/minikube/cmd/start.go | 6 +- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 8 +- pkg/minikube/cluster/cache.go | 168 ------- pkg/minikube/cluster/setup.go | 422 ------------------ pkg/minikube/node/node.go | 2 +- pkg/minikube/node/start.go | 443 +++++++++++++++++-- 9 files changed, 409 insertions(+), 650 deletions(-) delete mode 100644 pkg/minikube/cluster/cache.go delete mode 100644 pkg/minikube/cluster/setup.go diff --git a/cmd/minikube/cmd/cache.go b/cmd/minikube/cmd/cache.go index ab1f075853..eb91371984 100644 --- a/cmd/minikube/cmd/cache.go +++ b/cmd/minikube/cmd/cache.go @@ -19,10 +19,10 @@ package cmd import ( "github.com/spf13/cobra" cmdConfig "k8s.io/minikube/cmd/minikube/cmd/config" - "k8s.io/minikube/pkg/minikube/cluster" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/image" "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/node" ) // cacheImageConfigKey is the config field name used to store which images we have previously cached @@ -75,7 +75,7 @@ var reloadCacheCmd = &cobra.Command{ Short: "reload cached images.", Long: "reloads images previously added using the 'cache add' subcommand", Run: func(cmd *cobra.Command, args []string) { - err := cluster.CacheAndLoadImagesInConfig() + err := node.CacheAndLoadImagesInConfig() if err != nil { exit.WithError("Failed to reload cached images", err) } diff --git a/cmd/minikube/cmd/kubectl.go b/cmd/minikube/cmd/kubectl.go index e5520d8153..e24943a7d4 100644 --- a/cmd/minikube/cmd/kubectl.go +++ b/cmd/minikube/cmd/kubectl.go @@ -25,10 +25,10 @@ import ( "github.com/golang/glog" "github.com/spf13/cobra" "github.com/spf13/viper" - "k8s.io/minikube/pkg/minikube/cluster" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/out" ) @@ -59,7 +59,7 @@ minikube kubectl -- get pods --namespace kube-system`, version = cc.KubernetesConfig.KubernetesVersion } - path, err := cluster.CacheKubectlBinary(version) + path, err := node.CacheKubectlBinary(version) if err != nil { out.ErrLn("Error caching kubectl: %v", err) } diff --git a/cmd/minikube/cmd/node_start.go b/cmd/minikube/cmd/node_start.go index 758b60b7a3..17e3da8694 100644 --- a/cmd/minikube/cmd/node_start.go +++ b/cmd/minikube/cmd/node_start.go @@ -61,7 +61,7 @@ var nodeStartCmd = &cobra.Command{ } // Start it up baby - node.Start(*cc, *n, nil) + node.Start(*cc, *n, nil, false) }, } diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 6b063a88ba..9ab8a0e730 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -45,7 +45,6 @@ import ( "k8s.io/minikube/pkg/drivers/kic/oci" "k8s.io/minikube/pkg/minikube/bootstrapper/bsutil" "k8s.io/minikube/pkg/minikube/bootstrapper/images" - "k8s.io/minikube/pkg/minikube/cluster" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/cruntime" @@ -348,10 +347,7 @@ func runStart(cmd *cobra.Command, args []string) { } } - kubeconfig, err := cluster.InitialSetup(cc, n, existingAddons) - if err != nil { - exit.WithError("Starting node", err) - } + kubeconfig := node.Start(cc, n, existingAddons, true) numNodes := viper.GetInt(nodes) if numNodes > 1 { diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 0863688ea8..db8d9c3718 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -212,6 +212,10 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { return errors.Wrap(err, "setting up node") } + if err := k.applyNodeLabels(cfg); err != nil { + glog.Warningf("unable to apply node labels: %v", err) + } + if err := bsutil.AdjustResourceLimits(k.c); err != nil { glog.Warningf("unable to adjust resource limits: %v", err) } @@ -231,10 +235,6 @@ func (k *Bootstrapper) SetupNode(cfg config.ClusterConfig) error { } } - if err := k.applyNodeLabels(cfg); err != nil { - glog.Warningf("unable to apply node labels: %v", err) - } - return nil } diff --git a/pkg/minikube/cluster/cache.go b/pkg/minikube/cluster/cache.go deleted file mode 100644 index 6fcf303f27..0000000000 --- a/pkg/minikube/cluster/cache.go +++ /dev/null @@ -1,168 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cluster - -import ( - "os" - "runtime" - - "github.com/golang/glog" - "github.com/spf13/viper" - "golang.org/x/sync/errgroup" - cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" - "k8s.io/minikube/pkg/drivers/kic" - "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/constants" - "k8s.io/minikube/pkg/minikube/download" - "k8s.io/minikube/pkg/minikube/exit" - "k8s.io/minikube/pkg/minikube/image" - "k8s.io/minikube/pkg/minikube/localpath" - "k8s.io/minikube/pkg/minikube/machine" - "k8s.io/minikube/pkg/minikube/out" -) - -const ( - cacheImages = "cache-images" - cacheImageConfigKey = "cache" -) - -// BeginCacheKubernetesImages caches images required for kubernetes version in the background -func BeginCacheKubernetesImages(g *errgroup.Group, imageRepository string, k8sVersion string, cRuntime string) { - if download.PreloadExists(k8sVersion, cRuntime) { - glog.Info("Caching tarball of preloaded images") - err := download.Preload(k8sVersion, cRuntime) - if err == nil { - glog.Infof("Finished downloading the preloaded tar for %s on %s", k8sVersion, cRuntime) - return // don't cache individual images if preload is successful. - } - glog.Warningf("Error downloading preloaded artifacts will continue without preload: %v", err) - } - - if !viper.GetBool(cacheImages) { - return - } - - g.Go(func() error { - return machine.CacheImagesForBootstrapper(imageRepository, k8sVersion, viper.GetString(cmdcfg.Bootstrapper)) - }) -} - -// HandleDownloadOnly caches appropariate binaries and images -func HandleDownloadOnly(cacheGroup, kicGroup *errgroup.Group, k8sVersion string) { - // If --download-only, complete the remaining downloads and exit. - if !viper.GetBool("download-only") { - return - } - if err := doCacheBinaries(k8sVersion); err != nil { - exit.WithError("Failed to cache binaries", err) - } - if _, err := CacheKubectlBinary(k8sVersion); err != nil { - exit.WithError("Failed to cache kubectl", err) - } - WaitCacheRequiredImages(cacheGroup) - WaitDownloadKicArtifacts(kicGroup) - if err := saveImagesToTarFromConfig(); err != nil { - exit.WithError("Failed to cache images to tar", err) - } - out.T(out.Check, "Download complete!") - os.Exit(0) - -} - -// CacheKubectlBinary caches the kubectl binary -func CacheKubectlBinary(k8sVerison string) (string, error) { - binary := "kubectl" - if runtime.GOOS == "windows" { - binary = "kubectl.exe" - } - - return download.Binary(binary, k8sVerison, runtime.GOOS, runtime.GOARCH) -} - -// doCacheBinaries caches Kubernetes binaries in the foreground -func doCacheBinaries(k8sVersion string) error { - return machine.CacheBinariesForBootstrapper(k8sVersion, viper.GetString(cmdcfg.Bootstrapper)) -} - -// BeginDownloadKicArtifacts downloads the kic image + preload tarball, returns true if preload is available -func BeginDownloadKicArtifacts(g *errgroup.Group) { - glog.Info("Beginning downloading kic artifacts") - g.Go(func() error { - glog.Infof("Downloading %s to local daemon", kic.BaseImage) - return image.WriteImageToDaemon(kic.BaseImage) - }) -} - -// WaitDownloadKicArtifacts blocks until the required artifacts for KIC are downloaded. -func WaitDownloadKicArtifacts(g *errgroup.Group) { - if err := g.Wait(); err != nil { - glog.Errorln("Error downloading kic artifacts: ", err) - return - } - glog.Info("Successfully downloaded all kic artifacts") -} - -// WaitCacheRequiredImages blocks until the required images are all cached. -func WaitCacheRequiredImages(g *errgroup.Group) { - if !viper.GetBool(cacheImages) { - return - } - if err := g.Wait(); err != nil { - glog.Errorln("Error caching images: ", err) - } -} - -// saveImagesToTarFromConfig saves images to tar in cache which specified in config file. -// currently only used by download-only option -func saveImagesToTarFromConfig() error { - images, err := imagesInConfigFile() - if err != nil { - return err - } - if len(images) == 0 { - return nil - } - return image.SaveToDir(images, constants.ImageCacheDir) -} - -// CacheAndLoadImagesInConfig loads the images currently in the config file -// called by 'start' and 'cache reload' commands. -func CacheAndLoadImagesInConfig() error { - images, err := imagesInConfigFile() - if err != nil { - return err - } - if len(images) == 0 { - return nil - } - return machine.CacheAndLoadImages(images) -} - -func imagesInConfigFile() ([]string, error) { - configFile, err := config.ReadConfig(localpath.ConfigFile()) - if err != nil { - return nil, err - } - if values, ok := configFile[cacheImageConfigKey]; ok { - var images []string - for key := range values.(map[string]interface{}) { - images = append(images, key) - } - return images, nil - } - return []string{}, nil -} diff --git a/pkg/minikube/cluster/setup.go b/pkg/minikube/cluster/setup.go deleted file mode 100644 index 513154ccce..0000000000 --- a/pkg/minikube/cluster/setup.go +++ /dev/null @@ -1,422 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cluster - -import ( - "fmt" - "net" - "os" - "os/exec" - "strconv" - "strings" - "time" - - "github.com/blang/semver" - "github.com/docker/machine/libmachine" - "github.com/docker/machine/libmachine/host" - "github.com/golang/glog" - "github.com/pkg/errors" - "github.com/spf13/viper" - "golang.org/x/sync/errgroup" - cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" - "k8s.io/minikube/pkg/addons" - "k8s.io/minikube/pkg/drivers/kic/oci" - "k8s.io/minikube/pkg/minikube/bootstrapper" - "k8s.io/minikube/pkg/minikube/bootstrapper/images" - "k8s.io/minikube/pkg/minikube/command" - "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/constants" - "k8s.io/minikube/pkg/minikube/cruntime" - "k8s.io/minikube/pkg/minikube/driver" - "k8s.io/minikube/pkg/minikube/exit" - "k8s.io/minikube/pkg/minikube/kubeconfig" - "k8s.io/minikube/pkg/minikube/localpath" - "k8s.io/minikube/pkg/minikube/logs" - "k8s.io/minikube/pkg/minikube/machine" - "k8s.io/minikube/pkg/minikube/out" - "k8s.io/minikube/pkg/minikube/proxy" - "k8s.io/minikube/pkg/util" - "k8s.io/minikube/pkg/util/retry" -) - -const ( - waitTimeout = "wait-timeout" - waitUntilHealthy = "wait" - embedCerts = "embed-certs" - keepContext = "keep-context" - imageRepository = "image-repository" - containerRuntime = "container-runtime" -) - -// InitialSetup performs all necessary operations on the initial control plane node when first spinning up a cluster -func InitialSetup(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool) (*kubeconfig.Settings, error) { - var kicGroup errgroup.Group - if driver.IsKIC(cc.Driver) { - BeginDownloadKicArtifacts(&kicGroup) - } - - var cacheGroup errgroup.Group - if !driver.BareMetal(cc.Driver) { - BeginCacheKubernetesImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion, cc.KubernetesConfig.ContainerRuntime) - } - - // Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot. - // Hence, saveConfig must be called before startHost, and again afterwards when we know the IP. - if err := config.SaveProfile(viper.GetString(config.ProfileName), &cc); err != nil { - exit.WithError("Failed to save config", err) - } - - HandleDownloadOnly(&cacheGroup, &kicGroup, n.KubernetesVersion) - WaitDownloadKicArtifacts(&kicGroup) - - mRunner, preExists, machineAPI, host := StartMachine(&cc, &n) - defer machineAPI.Close() - - // wait for preloaded tarball to finish downloading before configuring runtimes - WaitCacheRequiredImages(&cacheGroup) - - sv, err := util.ParseKubernetesVersion(n.KubernetesVersion) - if err != nil { - return nil, err - } - - // configure the runtime (docker, containerd, crio) - cr := ConfigureRuntimes(mRunner, cc.Driver, cc.KubernetesConfig, sv) - - // Must be written before bootstrap, otherwise health checks may flake due to stale IP - kubeconfig, err := setupKubeconfig(host, &cc, &n, cc.Name) - if err != nil { - exit.WithError("Failed to setup kubeconfig", err) - } - - // setup kubeadm (must come after setupKubeconfig) - bs := setupKubeAdm(machineAPI, cc, n) - - // pull images or restart cluster - out.T(out.Launch, "Launching Kubernetes ... ") - err = bs.StartCluster(cc) - if err != nil { - exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, mRunner)) - } - //configureMounts() - - if err := CacheAndLoadImagesInConfig(); err != nil { - out.T(out.FailureType, "Unable to load cached images from config file.") - } - - // enable addons, both old and new! - if existingAddons != nil { - addons.Start(viper.GetString(config.ProfileName), existingAddons, config.AddonList) - } - - // special ops for none , like change minikube directory. - // multinode super doesn't work on the none driver - if cc.Driver == driver.None && len(cc.Nodes) == 1 { - prepareNone() - } - - // Skip pre-existing, because we already waited for health - if viper.GetBool(waitUntilHealthy) && !preExists { - if err := bs.WaitForNode(cc, n, viper.GetDuration(waitTimeout)); err != nil { - exit.WithError("Wait failed", err) - } - } - - return kubeconfig, nil - -} - -// ConfigureRuntimes does what needs to happen to get a runtime going. -func ConfigureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config.KubernetesConfig, kv semver.Version) cruntime.Manager { - co := cruntime.Config{ - Type: viper.GetString(containerRuntime), - Runner: runner, ImageRepository: k8s.ImageRepository, - KubernetesVersion: kv, - } - cr, err := cruntime.New(co) - if err != nil { - exit.WithError("Failed runtime", err) - } - - disableOthers := true - if driver.BareMetal(drvName) { - disableOthers = false - } - - // Preload is overly invasive for bare metal, and caching is not meaningful. KIC handled elsewhere. - if driver.IsVM(drvName) { - if err := cr.Preload(k8s); err != nil { - switch err.(type) { - case *cruntime.ErrISOFeature: - out.T(out.Tip, "Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'", out.V{"error": err}) - default: - glog.Warningf("%s preload failed: %v, falling back to caching images", cr.Name(), err) - } - - if err := machine.CacheImagesForBootstrapper(k8s.ImageRepository, k8s.KubernetesVersion, viper.GetString(cmdcfg.Bootstrapper)); err != nil { - exit.WithError("Failed to cache images", err) - } - } - } - - err = cr.Enable(disableOthers) - if err != nil { - exit.WithError("Failed to enable container runtime", err) - } - - return cr -} - -// setupKubeAdm adds any requested files into the VM before Kubernetes is started -func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node) bootstrapper.Bootstrapper { - bs, err := Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cfg, n) - if err != nil { - exit.WithError("Failed to get bootstrapper", err) - } - for _, eo := range config.ExtraOptions { - out.T(out.Option, "{{.extra_option_component_name}}.{{.key}}={{.value}}", out.V{"extra_option_component_name": eo.Component, "key": eo.Key, "value": eo.Value}) - } - // Loads cached images, generates config files, download binaries - if err := bs.UpdateCluster(cfg); err != nil { - exit.WithError("Failed to update cluster", err) - } - if err := bs.SetupCerts(cfg.KubernetesConfig, n); err != nil { - exit.WithError("Failed to setup certs", err) - } - return bs -} - -func setupKubeconfig(h *host.Host, cc *config.ClusterConfig, n *config.Node, clusterName string) (*kubeconfig.Settings, error) { - addr, err := apiServerURL(*h, *cc, *n) - if err != nil { - exit.WithError("Failed to get API Server URL", err) - } - - if cc.KubernetesConfig.APIServerName != constants.APIServerName { - addr = strings.Replace(addr, n.IP, cc.KubernetesConfig.APIServerName, -1) - } - kcs := &kubeconfig.Settings{ - ClusterName: clusterName, - ClusterServerAddress: addr, - ClientCertificate: localpath.MakeMiniPath("client.crt"), - ClientKey: localpath.MakeMiniPath("client.key"), - CertificateAuthority: localpath.MakeMiniPath("ca.crt"), - KeepContext: viper.GetBool(keepContext), - EmbedCerts: viper.GetBool(embedCerts), - } - - kcs.SetPath(kubeconfig.PathFromEnv()) - if err := kubeconfig.Update(kcs); err != nil { - return kcs, err - } - return kcs, nil -} - -func apiServerURL(h host.Host, cc config.ClusterConfig, n config.Node) (string, error) { - hostname := "" - port := n.Port - var err error - if driver.IsKIC(h.DriverName) { - // for kic drivers we use 127.0.0.1 instead of node IP, - // because of Docker on MacOs limitations for reaching to container's IP. - hostname = oci.DefaultBindIPV4 - port, err = oci.ForwardedPort(h.DriverName, h.Name, port) - if err != nil { - return "", errors.Wrap(err, "host port binding") - } - } else { - hostname, err = h.Driver.GetIP() - if err != nil { - return "", errors.Wrap(err, "get ip") - } - } - - if cc.KubernetesConfig.APIServerName != constants.APIServerName { - hostname = cc.KubernetesConfig.APIServerName - } - return fmt.Sprintf("https://" + net.JoinHostPort(hostname, strconv.Itoa(port))), nil -} - -// StartMachine starts a VM -func StartMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) { - m, err := machine.NewAPIClient() - if err != nil { - exit.WithError("Failed to get machine client", err) - } - host, preExists = startHost(m, *cfg, *node) - runner, err = machine.CommandRunner(host) - if err != nil { - exit.WithError("Failed to get command runner", err) - } - - ip := validateNetwork(host, runner) - - // Bypass proxy for minikube's vm host ip - err = proxy.ExcludeIP(ip) - if err != nil { - out.ErrT(out.FailureType, "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.", out.V{"ip": ip}) - } - - // Save IP to config file for subsequent use - node.IP = ip - err = config.SaveNode(cfg, node) - if err != nil { - exit.WithError("saving node", err) - } - - return runner, preExists, m, host -} - -// startHost starts a new minikube host using a VM or None -func startHost(api libmachine.API, mc config.ClusterConfig, n config.Node) (*host.Host, bool) { - host, exists, err := machine.StartHost(api, mc, n) - if err != nil { - exit.WithError("Unable to start VM. Please investigate and run 'minikube delete' if possible", err) - } - return host, exists -} - -// validateNetwork tries to catch network problems as soon as possible -func validateNetwork(h *host.Host, r command.Runner) string { - ip, err := h.Driver.GetIP() - if err != nil { - exit.WithError("Unable to get VM IP address", err) - } - - optSeen := false - warnedOnce := false - for _, k := range proxy.EnvVars { - if v := os.Getenv(k); v != "" { - if !optSeen { - out.T(out.Internet, "Found network options:") - optSeen = true - } - out.T(out.Option, "{{.key}}={{.value}}", out.V{"key": k, "value": v}) - ipExcluded := proxy.IsIPExcluded(ip) // Skip warning if minikube ip is already in NO_PROXY - k = strings.ToUpper(k) // for http_proxy & https_proxy - if (k == "HTTP_PROXY" || k == "HTTPS_PROXY") && !ipExcluded && !warnedOnce { - out.WarningT("You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details", out.V{"ip_address": ip, "documentation_url": "https://minikube.sigs.k8s.io/docs/reference/networking/proxy/"}) - warnedOnce = true - } - } - } - - if !driver.BareMetal(h.Driver.DriverName()) && !driver.IsKIC(h.Driver.DriverName()) { - trySSH(h, ip) - } - - tryLookup(r) - tryRegistry(r) - return ip -} - -func trySSH(h *host.Host, ip string) { - if viper.GetBool("force") { - return - } - - sshAddr := net.JoinHostPort(ip, "22") - - dial := func() (err error) { - d := net.Dialer{Timeout: 3 * time.Second} - conn, err := d.Dial("tcp", sshAddr) - if err != nil { - out.WarningT("Unable to verify SSH connectivity: {{.error}}. Will retry...", out.V{"error": err}) - return err - } - _ = conn.Close() - return nil - } - - if err := retry.Expo(dial, time.Second, 13*time.Second); err != nil { - exit.WithCodeT(exit.IO, `minikube is unable to connect to the VM: {{.error}} - - This is likely due to one of two reasons: - - - VPN or firewall interference - - {{.hypervisor}} network configuration issue - - Suggested workarounds: - - - Disable your local VPN or firewall software - - Configure your local VPN or firewall to allow access to {{.ip}} - - Restart or reinstall {{.hypervisor}} - - Use an alternative --vm-driver - - Use --force to override this connectivity check - `, out.V{"error": err, "hypervisor": h.Driver.DriverName(), "ip": ip}) - } -} - -func tryLookup(r command.Runner) { - // DNS check - if rr, err := r.RunCmd(exec.Command("nslookup", "kubernetes.io", "-type=ns")); err != nil { - glog.Infof("%s failed: %v which might be okay will retry nslookup without query type", rr.Args, err) - // will try with without query type for ISOs with different busybox versions. - if _, err = r.RunCmd(exec.Command("nslookup", "kubernetes.io")); err != nil { - glog.Warningf("nslookup failed: %v", err) - out.WarningT("Node may be unable to resolve external DNS records") - } - } -} -func tryRegistry(r command.Runner) { - // Try an HTTPS connection to the image repository - proxy := os.Getenv("HTTPS_PROXY") - opts := []string{"-sS"} - if proxy != "" && !strings.HasPrefix(proxy, "localhost") && !strings.HasPrefix(proxy, "127.0") { - opts = append([]string{"-x", proxy}, opts...) - } - - repo := viper.GetString(imageRepository) - if repo == "" { - repo = images.DefaultKubernetesRepo - } - - opts = append(opts, fmt.Sprintf("https://%s/", repo)) - if rr, err := r.RunCmd(exec.Command("curl", opts...)); err != nil { - glog.Warningf("%s failed: %v", rr.Args, err) - out.WarningT("VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository", out.V{"repository": repo}) - } -} - -// prepareNone prepares the user and host for the joy of the "none" driver -func prepareNone() { - out.T(out.StartingNone, "Configuring local host environment ...") - if viper.GetBool(config.WantNoneDriverWarning) { - out.T(out.Empty, "") - out.WarningT("The 'none' driver provides limited isolation and may reduce system security and reliability.") - out.WarningT("For more information, see:") - out.T(out.URL, "https://minikube.sigs.k8s.io/docs/reference/drivers/none/") - out.T(out.Empty, "") - } - - if os.Getenv("CHANGE_MINIKUBE_NONE_USER") == "" { - home := os.Getenv("HOME") - out.WarningT("kubectl and minikube configuration will be stored in {{.home_folder}}", out.V{"home_folder": home}) - out.WarningT("To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:") - - out.T(out.Empty, "") - out.T(out.Command, "sudo mv {{.home_folder}}/.kube {{.home_folder}}/.minikube $HOME", out.V{"home_folder": home}) - out.T(out.Command, "sudo chown -R $USER $HOME/.kube $HOME/.minikube") - out.T(out.Empty, "") - - out.T(out.Tip, "This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true") - } - - if err := util.MaybeChownDirRecursiveToMinikubeUser(localpath.MiniPath()); err != nil { - exit.WithCodeT(exit.Permissions, "Failed to change permissions for {{.minikube_dir_path}}: {{.error}}", out.V{"minikube_dir_path": localpath.MiniPath(), "error": err}) - } -} diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index 7458ff80a3..55b2fdf298 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -39,7 +39,7 @@ func Add(cc *config.ClusterConfig, n config.Node) error { return err } - Start(*cc, n, nil) + Start(*cc, n, nil, false) return nil } diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 0a5e3fc095..6f9b441366 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -17,94 +17,447 @@ limitations under the License. package node import ( + "fmt" + "net" + "os" + "os/exec" + "strconv" + "strings" + "time" + + "github.com/blang/semver" + "github.com/docker/machine/libmachine" + "github.com/docker/machine/libmachine/host" + "github.com/golang/glog" + "github.com/pkg/errors" "github.com/spf13/viper" "golang.org/x/sync/errgroup" cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" "k8s.io/minikube/pkg/addons" + "k8s.io/minikube/pkg/drivers/kic/oci" + "k8s.io/minikube/pkg/minikube/bootstrapper" + "k8s.io/minikube/pkg/minikube/bootstrapper/images" "k8s.io/minikube/pkg/minikube/cluster" + "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/constants" + "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" + "k8s.io/minikube/pkg/minikube/kubeconfig" + "k8s.io/minikube/pkg/minikube/localpath" + "k8s.io/minikube/pkg/minikube/logs" + "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/proxy" "k8s.io/minikube/pkg/util" + "k8s.io/minikube/pkg/util/retry" +) + +const ( + waitTimeout = "wait-timeout" + waitUntilHealthy = "wait" + embedCerts = "embed-certs" + keepContext = "keep-context" + imageRepository = "image-repository" + containerRuntime = "container-runtime" ) // Start spins up a guest and starts the kubernetes node. -func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool) { - // Now that the ISO is downloaded, pull images in the background while the VM boots. - var cacheGroup errgroup.Group - if !driver.BareMetal(cc.Driver) { - cluster.BeginCacheKubernetesImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion, cc.KubernetesConfig.ContainerRuntime) - } - +func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool, apiServer bool) *kubeconfig.Settings { var kicGroup errgroup.Group if driver.IsKIC(cc.Driver) { - cluster.BeginDownloadKicArtifacts(&kicGroup) + beginDownloadKicArtifacts(&kicGroup) } - runner, _, mAPI, _ := cluster.StartMachine(&cc, &n) - defer mAPI.Close() - - bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cc, n) - if err != nil { - exit.WithError("Failed to get bootstrapper", err) + var cacheGroup errgroup.Group + if !driver.BareMetal(cc.Driver) { + beginCacheKubernetesImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion, cc.KubernetesConfig.ContainerRuntime) } - k8sVersion := n.KubernetesVersion - driverName := cc.Driver - // exits here in case of --download-only option. - cluster.HandleDownloadOnly(&cacheGroup, &kicGroup, k8sVersion) - cluster.WaitDownloadKicArtifacts(&kicGroup) + // Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot. + // Hence, saveConfig must be called before startHost, and again afterwards when we know the IP. + if err := config.SaveProfile(viper.GetString(config.ProfileName), &cc); err != nil { + exit.WithError("Failed to save config", err) + } + + handleDownloadOnly(&cacheGroup, &kicGroup, n.KubernetesVersion) + waitDownloadKicArtifacts(&kicGroup) + + mRunner, preExists, machineAPI, host := startMachine(&cc, &n) + defer machineAPI.Close() // wait for preloaded tarball to finish downloading before configuring runtimes - cluster.WaitCacheRequiredImages(&cacheGroup) + waitCacheRequiredImages(&cacheGroup) - sv, err := util.ParseKubernetesVersion(cc.KubernetesConfig.KubernetesVersion) + sv, err := util.ParseKubernetesVersion(n.KubernetesVersion) if err != nil { exit.WithError("Failed to parse kubernetes version", err) } // configure the runtime (docker, containerd, crio) - cr := cluster.ConfigureRuntimes(runner, driverName, cc.KubernetesConfig, sv) - showVersionInfo(k8sVersion, cr) + cr := configureRuntimes(mRunner, cc.Driver, cc.KubernetesConfig, sv) + showVersionInfo(n.KubernetesVersion, cr) + + var bs bootstrapper.Bootstrapper + var kubeconfig *kubeconfig.Settings + if apiServer { + // Must be written before bootstrap, otherwise health checks may flake due to stale IP + kubeconfig, err = setupKubeconfig(host, &cc, &n, cc.Name) + if err != nil { + exit.WithError("Failed to setup kubeconfig", err) + } + + // setup kubeadm (must come after setupKubeconfig) + bs = setupKubeAdm(machineAPI, cc, n) + err = bs.StartCluster(cc) + if err != nil { + exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, mRunner)) + } + } else { + bs, err = cluster.Bootstrapper(machineAPI, viper.GetString(cmdcfg.Bootstrapper), cc, n) + if err != nil { + exit.WithError("Failed to get bootstrapper", err) + } + + if err = bs.SetupCerts(cc.KubernetesConfig, n); err != nil { + exit.WithError("setting up certs", err) + } + + if err = bs.SetupNode(cc); err != nil { + exit.WithError("Failed to setup node", err) + } + } configureMounts() + if err := CacheAndLoadImagesInConfig(); err != nil { + out.T(out.FailureType, "Unable to load cached images from config file.") + } + // enable addons, both old and new! if existingAddons != nil { addons.Start(viper.GetString(config.ProfileName), existingAddons, config.AddonList) } - if err := bs.UpdateNode(cc, n, cr); err != nil { - exit.WithError("Failed to update node", err) + if apiServer { + // special ops for none , like change minikube directory. + // multinode super doesn't work on the none driver + if cc.Driver == driver.None && len(cc.Nodes) == 1 { + prepareNone() + } + + // Skip pre-existing, because we already waited for health + if viper.GetBool(waitUntilHealthy) && !preExists { + if err := bs.WaitForNode(cc, n, viper.GetDuration(waitTimeout)); err != nil { + exit.WithError("Wait failed", err) + } + } + } else { + if err := bs.UpdateNode(cc, n, cr); err != nil { + exit.WithError("Updating node", err) + } + + cp, err := config.PrimaryControlPlane(&cc) + if err != nil { + exit.WithError("Getting primary control plane", err) + } + cpBs, err := cluster.Bootstrapper(machineAPI, viper.GetString(cmdcfg.Bootstrapper), cc, cp) + if err != nil { + exit.WithError("Getting bootstrapper", err) + } + + joinCmd, err := cpBs.GenerateToken(cc) + if err != nil { + exit.WithError("generating join token", err) + } + + if err = bs.JoinCluster(cc, n, joinCmd); err != nil { + exit.WithError("joining cluster", err) + } } - if err := cluster.CacheAndLoadImagesInConfig(); err != nil { - exit.WithError("Unable to load cached images from config file.", err) - } + return kubeconfig - if err = bs.SetupCerts(cc.KubernetesConfig, n); err != nil { - exit.WithError("setting up certs", err) - } +} - if err = bs.SetupNode(cc); err != nil { - exit.WithError("Failed to setup node", err) +// ConfigureRuntimes does what needs to happen to get a runtime going. +func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config.KubernetesConfig, kv semver.Version) cruntime.Manager { + co := cruntime.Config{ + Type: viper.GetString(containerRuntime), + Runner: runner, ImageRepository: k8s.ImageRepository, + KubernetesVersion: kv, } - - cp, err := config.PrimaryControlPlane(&cc) + cr, err := cruntime.New(co) if err != nil { - exit.WithError("Getting primary control plane", err) - } - cpBs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cc, cp) - if err != nil { - exit.WithError("Getting bootstrapper", err) + exit.WithError("Failed runtime", err) } - joinCmd, err := cpBs.GenerateToken(cc) - if err != nil { - exit.WithError("generating join token", err) + disableOthers := true + if driver.BareMetal(drvName) { + disableOthers = false } - if err = bs.JoinCluster(cc, n, joinCmd); err != nil { - exit.WithError("joining cluster", err) + // Preload is overly invasive for bare metal, and caching is not meaningful. KIC handled elsewhere. + if driver.IsVM(drvName) { + if err := cr.Preload(k8s); err != nil { + switch err.(type) { + case *cruntime.ErrISOFeature: + out.T(out.Tip, "Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'", out.V{"error": err}) + default: + glog.Warningf("%s preload failed: %v, falling back to caching images", cr.Name(), err) + } + + if err := machine.CacheImagesForBootstrapper(k8s.ImageRepository, k8s.KubernetesVersion, viper.GetString(cmdcfg.Bootstrapper)); err != nil { + exit.WithError("Failed to cache images", err) + } + } + } + + err = cr.Enable(disableOthers) + if err != nil { + exit.WithError("Failed to enable container runtime", err) + } + + return cr +} + +// setupKubeAdm adds any requested files into the VM before Kubernetes is started +func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node) bootstrapper.Bootstrapper { + bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cfg, n) + if err != nil { + exit.WithError("Failed to get bootstrapper", err) + } + for _, eo := range config.ExtraOptions { + out.T(out.Option, "{{.extra_option_component_name}}.{{.key}}={{.value}}", out.V{"extra_option_component_name": eo.Component, "key": eo.Key, "value": eo.Value}) + } + // Loads cached images, generates config files, download binaries + if err := bs.UpdateCluster(cfg); err != nil { + exit.WithError("Failed to update cluster", err) + } + if err := bs.SetupCerts(cfg.KubernetesConfig, n); err != nil { + exit.WithError("Failed to setup certs", err) + } + return bs +} + +func setupKubeconfig(h *host.Host, cc *config.ClusterConfig, n *config.Node, clusterName string) (*kubeconfig.Settings, error) { + addr, err := apiServerURL(*h, *cc, *n) + if err != nil { + exit.WithError("Failed to get API Server URL", err) + } + + if cc.KubernetesConfig.APIServerName != constants.APIServerName { + addr = strings.Replace(addr, n.IP, cc.KubernetesConfig.APIServerName, -1) + } + kcs := &kubeconfig.Settings{ + ClusterName: clusterName, + ClusterServerAddress: addr, + ClientCertificate: localpath.MakeMiniPath("client.crt"), + ClientKey: localpath.MakeMiniPath("client.key"), + CertificateAuthority: localpath.MakeMiniPath("ca.crt"), + KeepContext: viper.GetBool(keepContext), + EmbedCerts: viper.GetBool(embedCerts), + } + + kcs.SetPath(kubeconfig.PathFromEnv()) + if err := kubeconfig.Update(kcs); err != nil { + return kcs, err + } + return kcs, nil +} + +func apiServerURL(h host.Host, cc config.ClusterConfig, n config.Node) (string, error) { + hostname := "" + port := n.Port + var err error + if driver.IsKIC(h.DriverName) { + // for kic drivers we use 127.0.0.1 instead of node IP, + // because of Docker on MacOs limitations for reaching to container's IP. + hostname = oci.DefaultBindIPV4 + port, err = oci.ForwardedPort(h.DriverName, h.Name, port) + if err != nil { + return "", errors.Wrap(err, "host port binding") + } + } else { + hostname, err = h.Driver.GetIP() + if err != nil { + return "", errors.Wrap(err, "get ip") + } + } + + if cc.KubernetesConfig.APIServerName != constants.APIServerName { + hostname = cc.KubernetesConfig.APIServerName + } + return fmt.Sprintf("https://" + net.JoinHostPort(hostname, strconv.Itoa(port))), nil +} + +// StartMachine starts a VM +func startMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) { + m, err := machine.NewAPIClient() + if err != nil { + exit.WithError("Failed to get machine client", err) + } + host, preExists = startHost(m, *cfg, *node) + runner, err = machine.CommandRunner(host) + if err != nil { + exit.WithError("Failed to get command runner", err) + } + + ip := validateNetwork(host, runner) + + // Bypass proxy for minikube's vm host ip + err = proxy.ExcludeIP(ip) + if err != nil { + out.ErrT(out.FailureType, "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.", out.V{"ip": ip}) + } + + // Save IP to config file for subsequent use + node.IP = ip + err = config.SaveNode(cfg, node) + if err != nil { + exit.WithError("saving node", err) + } + + return runner, preExists, m, host +} + +// startHost starts a new minikube host using a VM or None +func startHost(api libmachine.API, mc config.ClusterConfig, n config.Node) (*host.Host, bool) { + host, exists, err := machine.StartHost(api, mc, n) + if err != nil { + exit.WithError("Unable to start VM. Please investigate and run 'minikube delete' if possible", err) + } + return host, exists +} + +// validateNetwork tries to catch network problems as soon as possible +func validateNetwork(h *host.Host, r command.Runner) string { + ip, err := h.Driver.GetIP() + if err != nil { + exit.WithError("Unable to get VM IP address", err) + } + + optSeen := false + warnedOnce := false + for _, k := range proxy.EnvVars { + if v := os.Getenv(k); v != "" { + if !optSeen { + out.T(out.Internet, "Found network options:") + optSeen = true + } + out.T(out.Option, "{{.key}}={{.value}}", out.V{"key": k, "value": v}) + ipExcluded := proxy.IsIPExcluded(ip) // Skip warning if minikube ip is already in NO_PROXY + k = strings.ToUpper(k) // for http_proxy & https_proxy + if (k == "HTTP_PROXY" || k == "HTTPS_PROXY") && !ipExcluded && !warnedOnce { + out.WarningT("You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details", out.V{"ip_address": ip, "documentation_url": "https://minikube.sigs.k8s.io/docs/reference/networking/proxy/"}) + warnedOnce = true + } + } + } + + if !driver.BareMetal(h.Driver.DriverName()) && !driver.IsKIC(h.Driver.DriverName()) { + trySSH(h, ip) + } + + tryLookup(r) + tryRegistry(r) + return ip +} + +func trySSH(h *host.Host, ip string) { + if viper.GetBool("force") { + return + } + + sshAddr := net.JoinHostPort(ip, "22") + + dial := func() (err error) { + d := net.Dialer{Timeout: 3 * time.Second} + conn, err := d.Dial("tcp", sshAddr) + if err != nil { + out.WarningT("Unable to verify SSH connectivity: {{.error}}. Will retry...", out.V{"error": err}) + return err + } + _ = conn.Close() + return nil + } + + if err := retry.Expo(dial, time.Second, 13*time.Second); err != nil { + exit.WithCodeT(exit.IO, `minikube is unable to connect to the VM: {{.error}} + + This is likely due to one of two reasons: + + - VPN or firewall interference + - {{.hypervisor}} network configuration issue + + Suggested workarounds: + + - Disable your local VPN or firewall software + - Configure your local VPN or firewall to allow access to {{.ip}} + - Restart or reinstall {{.hypervisor}} + - Use an alternative --vm-driver + - Use --force to override this connectivity check + `, out.V{"error": err, "hypervisor": h.Driver.DriverName(), "ip": ip}) + } +} + +func tryLookup(r command.Runner) { + // DNS check + if rr, err := r.RunCmd(exec.Command("nslookup", "kubernetes.io", "-type=ns")); err != nil { + glog.Infof("%s failed: %v which might be okay will retry nslookup without query type", rr.Args, err) + // will try with without query type for ISOs with different busybox versions. + if _, err = r.RunCmd(exec.Command("nslookup", "kubernetes.io")); err != nil { + glog.Warningf("nslookup failed: %v", err) + out.WarningT("Node may be unable to resolve external DNS records") + } + } +} +func tryRegistry(r command.Runner) { + // Try an HTTPS connection to the image repository + proxy := os.Getenv("HTTPS_PROXY") + opts := []string{"-sS"} + if proxy != "" && !strings.HasPrefix(proxy, "localhost") && !strings.HasPrefix(proxy, "127.0") { + opts = append([]string{"-x", proxy}, opts...) + } + + repo := viper.GetString(imageRepository) + if repo == "" { + repo = images.DefaultKubernetesRepo + } + + opts = append(opts, fmt.Sprintf("https://%s/", repo)) + if rr, err := r.RunCmd(exec.Command("curl", opts...)); err != nil { + glog.Warningf("%s failed: %v", rr.Args, err) + out.WarningT("VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository", out.V{"repository": repo}) + } +} + +// prepareNone prepares the user and host for the joy of the "none" driver +func prepareNone() { + out.T(out.StartingNone, "Configuring local host environment ...") + if viper.GetBool(config.WantNoneDriverWarning) { + out.T(out.Empty, "") + out.WarningT("The 'none' driver provides limited isolation and may reduce system security and reliability.") + out.WarningT("For more information, see:") + out.T(out.URL, "https://minikube.sigs.k8s.io/docs/reference/drivers/none/") + out.T(out.Empty, "") + } + + if os.Getenv("CHANGE_MINIKUBE_NONE_USER") == "" { + home := os.Getenv("HOME") + out.WarningT("kubectl and minikube configuration will be stored in {{.home_folder}}", out.V{"home_folder": home}) + out.WarningT("To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:") + + out.T(out.Empty, "") + out.T(out.Command, "sudo mv {{.home_folder}}/.kube {{.home_folder}}/.minikube $HOME", out.V{"home_folder": home}) + out.T(out.Command, "sudo chown -R $USER $HOME/.kube $HOME/.minikube") + out.T(out.Empty, "") + + out.T(out.Tip, "This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true") + } + + if err := util.MaybeChownDirRecursiveToMinikubeUser(localpath.MiniPath()); err != nil { + exit.WithCodeT(exit.Permissions, "Failed to change permissions for {{.minikube_dir_path}}: {{.error}}", out.V{"minikube_dir_path": localpath.MiniPath(), "error": err}) } } From add1c8f953fd6e45f8d937a54c1be198b565b92b Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 18 Mar 2020 15:10:34 -0700 Subject: [PATCH 36/42] missing file --- pkg/minikube/node/cache.go | 168 +++++++++++++++++++++++++++++++++++++ 1 file changed, 168 insertions(+) create mode 100644 pkg/minikube/node/cache.go diff --git a/pkg/minikube/node/cache.go b/pkg/minikube/node/cache.go new file mode 100644 index 0000000000..f1b3ac8f32 --- /dev/null +++ b/pkg/minikube/node/cache.go @@ -0,0 +1,168 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package node + +import ( + "os" + "runtime" + + "github.com/golang/glog" + "github.com/spf13/viper" + "golang.org/x/sync/errgroup" + cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" + "k8s.io/minikube/pkg/drivers/kic" + "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/constants" + "k8s.io/minikube/pkg/minikube/download" + "k8s.io/minikube/pkg/minikube/exit" + "k8s.io/minikube/pkg/minikube/image" + "k8s.io/minikube/pkg/minikube/localpath" + "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/out" +) + +const ( + cacheImages = "cache-images" + cacheImageConfigKey = "cache" +) + +// BeginCacheKubernetesImages caches images required for kubernetes version in the background +func beginCacheKubernetesImages(g *errgroup.Group, imageRepository string, k8sVersion string, cRuntime string) { + if download.PreloadExists(k8sVersion, cRuntime) { + glog.Info("Caching tarball of preloaded images") + err := download.Preload(k8sVersion, cRuntime) + if err == nil { + glog.Infof("Finished downloading the preloaded tar for %s on %s", k8sVersion, cRuntime) + return // don't cache individual images if preload is successful. + } + glog.Warningf("Error downloading preloaded artifacts will continue without preload: %v", err) + } + + if !viper.GetBool(cacheImages) { + return + } + + g.Go(func() error { + return machine.CacheImagesForBootstrapper(imageRepository, k8sVersion, viper.GetString(cmdcfg.Bootstrapper)) + }) +} + +// HandleDownloadOnly caches appropariate binaries and images +func handleDownloadOnly(cacheGroup, kicGroup *errgroup.Group, k8sVersion string) { + // If --download-only, complete the remaining downloads and exit. + if !viper.GetBool("download-only") { + return + } + if err := doCacheBinaries(k8sVersion); err != nil { + exit.WithError("Failed to cache binaries", err) + } + if _, err := CacheKubectlBinary(k8sVersion); err != nil { + exit.WithError("Failed to cache kubectl", err) + } + waitCacheRequiredImages(cacheGroup) + waitDownloadKicArtifacts(kicGroup) + if err := saveImagesToTarFromConfig(); err != nil { + exit.WithError("Failed to cache images to tar", err) + } + out.T(out.Check, "Download complete!") + os.Exit(0) + +} + +// CacheKubectlBinary caches the kubectl binary +func CacheKubectlBinary(k8sVerison string) (string, error) { + binary := "kubectl" + if runtime.GOOS == "windows" { + binary = "kubectl.exe" + } + + return download.Binary(binary, k8sVerison, runtime.GOOS, runtime.GOARCH) +} + +// doCacheBinaries caches Kubernetes binaries in the foreground +func doCacheBinaries(k8sVersion string) error { + return machine.CacheBinariesForBootstrapper(k8sVersion, viper.GetString(cmdcfg.Bootstrapper)) +} + +// BeginDownloadKicArtifacts downloads the kic image + preload tarball, returns true if preload is available +func beginDownloadKicArtifacts(g *errgroup.Group) { + glog.Info("Beginning downloading kic artifacts") + g.Go(func() error { + glog.Infof("Downloading %s to local daemon", kic.BaseImage) + return image.WriteImageToDaemon(kic.BaseImage) + }) +} + +// WaitDownloadKicArtifacts blocks until the required artifacts for KIC are downloaded. +func waitDownloadKicArtifacts(g *errgroup.Group) { + if err := g.Wait(); err != nil { + glog.Errorln("Error downloading kic artifacts: ", err) + return + } + glog.Info("Successfully downloaded all kic artifacts") +} + +// WaitCacheRequiredImages blocks until the required images are all cached. +func waitCacheRequiredImages(g *errgroup.Group) { + if !viper.GetBool(cacheImages) { + return + } + if err := g.Wait(); err != nil { + glog.Errorln("Error caching images: ", err) + } +} + +// saveImagesToTarFromConfig saves images to tar in cache which specified in config file. +// currently only used by download-only option +func saveImagesToTarFromConfig() error { + images, err := imagesInConfigFile() + if err != nil { + return err + } + if len(images) == 0 { + return nil + } + return image.SaveToDir(images, constants.ImageCacheDir) +} + +// CacheAndLoadImagesInConfig loads the images currently in the config file +// called by 'start' and 'cache reload' commands. +func CacheAndLoadImagesInConfig() error { + images, err := imagesInConfigFile() + if err != nil { + return err + } + if len(images) == 0 { + return nil + } + return machine.CacheAndLoadImages(images) +} + +func imagesInConfigFile() ([]string, error) { + configFile, err := config.ReadConfig(localpath.ConfigFile()) + if err != nil { + return nil, err + } + if values, ok := configFile[cacheImageConfigKey]; ok { + var images []string + for key := range values.(map[string]interface{}) { + images = append(images, key) + } + return images, nil + } + return []string{}, nil +} From efac79eb26e060f1daf68c4002fcb08ddd41e457 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 18 Mar 2020 17:00:29 -0700 Subject: [PATCH 37/42] account for hyphens in profile name --- pkg/minikube/driver/driver.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/minikube/driver/driver.go b/pkg/minikube/driver/driver.go index 170b99e71a..b7fd18b0c2 100644 --- a/pkg/minikube/driver/driver.go +++ b/pkg/minikube/driver/driver.go @@ -240,7 +240,8 @@ func MachineName(cc config.ClusterConfig, n config.Node) string { // ClusterNameFromMachine retrieves the cluster name embedded in the machine name func ClusterNameFromMachine(name string) string { if strings.Contains(name, "-") { - return strings.Split(name, "-")[0] + a := strings.Split(name, "-") + return strings.Join(a[0:len(a)-2], "-") } return name } From fa97a5bf0d0f7a0b4a67512b490a8caf418d9fdd Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 18 Mar 2020 18:59:15 -0700 Subject: [PATCH 38/42] fix machine name creation --- pkg/minikube/driver/driver.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/pkg/minikube/driver/driver.go b/pkg/minikube/driver/driver.go index b7fd18b0c2..ebee96ffca 100644 --- a/pkg/minikube/driver/driver.go +++ b/pkg/minikube/driver/driver.go @@ -234,14 +234,13 @@ func MachineName(cc config.ClusterConfig, n config.Node) string { if len(cc.Nodes) == 1 || n.ControlPlane { return cc.Name } - return fmt.Sprintf("%s-%s", cc.Name, n.Name) + return fmt.Sprintf("%s---%s", cc.Name, n.Name) } // ClusterNameFromMachine retrieves the cluster name embedded in the machine name func ClusterNameFromMachine(name string) string { - if strings.Contains(name, "-") { - a := strings.Split(name, "-") - return strings.Join(a[0:len(a)-2], "-") + if strings.Contains(name, "---") { + return strings.Split(name, "---")[0] } return name } From f9b38dc04ea6b339b1b783a6a801b773c010b505 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 18 Mar 2020 21:28:03 -0700 Subject: [PATCH 39/42] clean up status output and have multinode survive cluster restarts --- cmd/minikube/cmd/start.go | 3 +++ cmd/minikube/cmd/status.go | 21 ++++++++++++--------- cmd/minikube/cmd/status_test.go | 6 +++--- pkg/minikube/driver/driver.go | 6 +++--- pkg/provision/provision.go | 2 +- 5 files changed, 22 insertions(+), 16 deletions(-) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 9ab8a0e730..44e01cf7a9 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -350,6 +350,9 @@ func runStart(cmd *cobra.Command, args []string) { kubeconfig := node.Start(cc, n, existingAddons, true) numNodes := viper.GetInt(nodes) + if numNodes == 1 && existing != nil { + numNodes = len(existing.Nodes) + } if numNodes > 1 { if driver.BareMetal(driverName) { out.T(out.Meh, "The none driver is not compatible with multi-node clusters.") diff --git a/cmd/minikube/cmd/status.go b/cmd/minikube/cmd/status.go index a153a7b932..44b96bc8e2 100644 --- a/cmd/minikube/cmd/status.go +++ b/cmd/minikube/cmd/status.go @@ -67,6 +67,7 @@ type Status struct { Kubelet string APIServer string Kubeconfig string + Worker bool } const ( @@ -78,6 +79,12 @@ host: {{.Host}} kubelet: {{.Kubelet}} apiserver: {{.APIServer}} kubeconfig: {{.Kubeconfig}} + +` + workerStatusFormat = `{{.Name}} +host: {{.Host}} +kubelet: {{.Kubelet}} + ` ) @@ -153,15 +160,7 @@ func exitCode(st *Status) int { func status(api libmachine.API, name string, controlPlane bool) (*Status, error) { - var profile, node string - - if strings.Contains(name, "-") { - profile = strings.Split(name, "-")[0] - node = strings.Split(name, "-")[1] - } else { - profile = name - node = name - } + profile, node := driver.ClusterNameFromMachine(name) st := &Status{ Name: node, @@ -169,6 +168,7 @@ func status(api libmachine.API, name string, controlPlane bool) (*Status, error) APIServer: Nonexistent, Kubelet: Nonexistent, Kubeconfig: Nonexistent, + Worker: !controlPlane, } hs, err := machine.Status(api, name) @@ -265,6 +265,9 @@ For the list accessible variables for the template, see the struct values here: func statusText(st *Status, w io.Writer) error { tmpl, err := template.New("status").Parse(statusFormat) + if st.Worker && statusFormat == defaultStatusFormat { + tmpl, err = template.New("worker-status").Parse(workerStatusFormat) + } if err != nil { return err } diff --git a/cmd/minikube/cmd/status_test.go b/cmd/minikube/cmd/status_test.go index 44f4133dfd..b11e549a6d 100644 --- a/cmd/minikube/cmd/status_test.go +++ b/cmd/minikube/cmd/status_test.go @@ -52,17 +52,17 @@ func TestStatusText(t *testing.T) { { name: "ok", state: &Status{Name: "minikube", Host: "Running", Kubelet: "Running", APIServer: "Running", Kubeconfig: Configured}, - want: "minikube\nhost: Running\nkubelet: Running\napiserver: Running\nkubeconfig: Configured\n", + want: "minikube\nhost: Running\nkubelet: Running\napiserver: Running\nkubeconfig: Configured\n\n", }, { name: "paused", state: &Status{Name: "minikube", Host: "Running", Kubelet: "Stopped", APIServer: "Paused", Kubeconfig: Configured}, - want: "minikube\nhost: Running\nkubelet: Stopped\napiserver: Paused\nkubeconfig: Configured\n", + want: "minikube\nhost: Running\nkubelet: Stopped\napiserver: Paused\nkubeconfig: Configured\n\n", }, { name: "down", state: &Status{Name: "minikube", Host: "Stopped", Kubelet: "Stopped", APIServer: "Stopped", Kubeconfig: Misconfigured}, - want: "minikube\nhost: Stopped\nkubelet: Stopped\napiserver: Stopped\nkubeconfig: Misconfigured\n\nWARNING: Your kubectl is pointing to stale minikube-vm.\nTo fix the kubectl context, run `minikube update-context`\n", + want: "minikube\nhost: Stopped\nkubelet: Stopped\napiserver: Stopped\nkubeconfig: Misconfigured\n\n\nWARNING: Your kubectl is pointing to stale minikube-vm.\nTo fix the kubectl context, run `minikube update-context`\n", }, } for _, tc := range tests { diff --git a/pkg/minikube/driver/driver.go b/pkg/minikube/driver/driver.go index ebee96ffca..b6106474d8 100644 --- a/pkg/minikube/driver/driver.go +++ b/pkg/minikube/driver/driver.go @@ -238,9 +238,9 @@ func MachineName(cc config.ClusterConfig, n config.Node) string { } // ClusterNameFromMachine retrieves the cluster name embedded in the machine name -func ClusterNameFromMachine(name string) string { +func ClusterNameFromMachine(name string) (string, string) { if strings.Contains(name, "---") { - return strings.Split(name, "---")[0] + return strings.Split(name, "---")[0], strings.Split(name, "---")[1] } - return name + return name, name } diff --git a/pkg/provision/provision.go b/pkg/provision/provision.go index fd84405266..acad46c3ac 100644 --- a/pkg/provision/provision.go +++ b/pkg/provision/provision.go @@ -196,7 +196,7 @@ func setRemoteAuthOptions(p provision.Provisioner) auth.Options { } func setContainerRuntimeOptions(name string, p miniProvisioner) error { - cluster := driver.ClusterNameFromMachine(name) + cluster, _ := driver.ClusterNameFromMachine(name) c, err := config.Load(cluster) if err != nil { return errors.Wrap(err, "getting cluster config") From 35aec77fe4a595c169a67f78aff76849029c55ec Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Fri, 20 Mar 2020 14:07:43 -0700 Subject: [PATCH 40/42] code comments --- cmd/minikube/cmd/node_add.go | 4 +--- cmd/minikube/cmd/ssh.go | 3 +-- cmd/minikube/cmd/start.go | 4 ++-- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 5 ----- pkg/minikube/node/node.go | 6 ++++++ 5 files changed, 10 insertions(+), 12 deletions(-) diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index 1e28103142..d593639b4d 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -17,8 +17,6 @@ limitations under the License. package cmd import ( - "fmt" - "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/spf13/viper" @@ -48,7 +46,7 @@ var nodeAddCmd = &cobra.Command{ out.ErrT(out.FailureType, "none driver does not support multi-node clusters") } - name := fmt.Sprintf("m%02d", len(cc.Nodes)+1) + name := node.Name(len(cc.Nodes) + 1) out.T(out.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": profile}) diff --git a/cmd/minikube/cmd/ssh.go b/cmd/minikube/cmd/ssh.go index 917733ac81..4a8508ec5d 100644 --- a/cmd/minikube/cmd/ssh.go +++ b/cmd/minikube/cmd/ssh.go @@ -60,8 +60,7 @@ var sshCmd = &cobra.Command{ } else { n, _, err = node.Retrieve(cc, nodeName) if err != nil { - out.FailureT("Node {{.nodeName}} does not exist.", out.V{"nodeName": nodeName}) - exit.WithError("", err) + exit.WithCodeT(exit.Unavailable, "Node {{.nodeName}} does not exist.", out.V{"nodeName": nodeName}) } } host, err := machine.LoadHost(api, driver.MachineName(*cc, *n)) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 3a6771976a..75ff638511 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -356,10 +356,10 @@ func runStart(cmd *cobra.Command, args []string) { } if numNodes > 1 { if driver.BareMetal(driverName) { - out.T(out.Meh, "The none driver is not compatible with multi-node clusters.") + exit.WithCodeT(exit.Config, "The none driver is not compatible with multi-node clusters.") } else { for i := 1; i < numNodes; i++ { - nodeName := fmt.Sprintf("m%02d", i+1) + nodeName := node.Name(i + 1) n := config.Node{ Name: nodeName, Worker: true, diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 37516d0f09..d00f166a54 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -419,13 +419,8 @@ func (k *Bootstrapper) GenerateToken(cc config.ClusterConfig) (string, error) { return "", errors.Wrap(err, "generating bootstrap token") } - /*cp, err := config.PrimaryControlPlane(&cc) - if err != nil { - return "", errors.Wrap(err, "getting primary control plane") - }*/ joinCmd := r.Stdout.String() joinCmd = strings.Replace(joinCmd, "kubeadm", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion), 1) - //joinCmd = strings.ReplaceAll(joinCmd, "localhost", cp.IP) joinCmd = fmt.Sprintf("%s --ignore-preflight-errors=all", strings.TrimSpace(joinCmd)) return joinCmd, nil diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index 55b2fdf298..97e9d2f204 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -18,6 +18,7 @@ package node import ( "errors" + "fmt" "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/config" @@ -91,3 +92,8 @@ func Save(cfg *config.ClusterConfig, node *config.Node) error { } return config.SaveProfile(viper.GetString(config.ProfileName), cfg) } + +// Name returns the appropriate name for the node given the current number of nodes +func Name(index int) string { + return fmt.Sprintf("m%02d", index) +} From a09aa6253588b1a6c75033aef55a43214090923f Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Fri, 20 Mar 2020 14:34:49 -0700 Subject: [PATCH 41/42] delete admin.conf before running kubedm init --- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index d00f166a54..7a3ade16c2 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -165,6 +165,13 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { glog.Infof("StartCluster complete in %s", time.Since(start)) }() + // Remove admin.conf from any previous run + c := exec.Command("/bin/bash", "-c", "sudo rm -f /etc/kubernetes/admin.conf") + _, err = k.c.RunCmd(c) + if err != nil { + return errors.Wrap(err, "deleting admin.conf") + } + version, err := util.ParseKubernetesVersion(cfg.KubernetesConfig.KubernetesVersion) if err != nil { return errors.Wrap(err, "parsing kubernetes version") @@ -202,7 +209,7 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { } - c := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s init --config %s %s --ignore-preflight-errors=%s", bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), bsutil.KubeadmYamlPath, extraFlags, strings.Join(ignore, ","))) + c = exec.Command("/bin/bash", "-c", fmt.Sprintf("%s init --config %s %s --ignore-preflight-errors=%s", bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), bsutil.KubeadmYamlPath, extraFlags, strings.Join(ignore, ","))) rr, err := k.c.RunCmd(c) if err != nil { return errors.Wrapf(err, "init failed. output: %q", rr.Output()) From 05814cce2833477531397d57a0a672f9c8e7f8e5 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Fri, 20 Mar 2020 15:10:45 -0700 Subject: [PATCH 42/42] only apply kic networking overlay to control plane --- pkg/minikube/bootstrapper/bootstrapper.go | 1 - pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 17 ++++------------- pkg/minikube/node/start.go | 3 --- 3 files changed, 4 insertions(+), 17 deletions(-) diff --git a/pkg/minikube/bootstrapper/bootstrapper.go b/pkg/minikube/bootstrapper/bootstrapper.go index 1dac315e80..5627e1b3c3 100644 --- a/pkg/minikube/bootstrapper/bootstrapper.go +++ b/pkg/minikube/bootstrapper/bootstrapper.go @@ -42,7 +42,6 @@ type Bootstrapper interface { WaitForNode(config.ClusterConfig, config.Node, time.Duration) error JoinCluster(config.ClusterConfig, config.Node, string) error UpdateNode(config.ClusterConfig, config.Node, cruntime.Manager) error - SetupNode(config.ClusterConfig) error GenerateToken(config.ClusterConfig) (string, error) // LogCommands returns a map of log type to a command which will display that log. LogCommands(LogOptions) map[string]string diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 7a3ade16c2..1c0268d779 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -215,8 +215,10 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { return errors.Wrapf(err, "init failed. output: %q", rr.Output()) } - if err = k.SetupNode(cfg); err != nil { - return errors.Wrap(err, "setting up node") + if cfg.Driver == driver.Docker { + if err := k.applyKicOverlay(cfg); err != nil { + return errors.Wrap(err, "apply kic overlay") + } } if err := k.applyNodeLabels(cfg); err != nil { @@ -234,17 +236,6 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { return nil } -// SetupNode runs commands that need to be on all nodes -func (k *Bootstrapper) SetupNode(cfg config.ClusterConfig) error { - if cfg.Driver == driver.Docker { - if err := k.applyKicOverlay(cfg); err != nil { - return errors.Wrap(err, "apply kic overlay") - } - } - - return nil -} - // client sets and returns a Kubernetes client to use to speak to a kubeadm launched apiserver func (k *Bootstrapper) client(ip string, port int) (*kubernetes.Clientset, error) { if k.k8sClient != nil { diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 6f9b441366..b46469837e 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -124,9 +124,6 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo exit.WithError("setting up certs", err) } - if err = bs.SetupNode(cc); err != nil { - exit.WithError("Failed to setup node", err) - } } configureMounts()