Merge pull request #7930 from sharifelgamal/cni

Automatically apply CNI on multinode clusters
pull/7896/head
Thomas Strömberg 2020-04-29 09:14:38 -07:00 committed by GitHub
commit 7b39b102d6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 71 additions and 26 deletions

View File

@ -67,6 +67,12 @@ var nodeAddCmd = &cobra.Command{
}
}
// Add CNI config if it's not already there
// We need to run kubeadm.init here as well
if err := config.MultiNodeCNIConfig(cc); err != nil {
exit.WithError("failed to save config", err)
}
out.T(out.Ready, "Successfully added {{.name}} to {{.cluster}}!", out.V{"name": name, "cluster": cc.Name})
},
}

View File

@ -23,12 +23,6 @@ import (
)
const (
// DefaultNetwork is the Docker default bridge network named "bridge"
// (https://docs.docker.com/network/bridge/#use-the-default-bridge-network)
DefaultNetwork = "bridge"
// DefaultPodCIDR is The CIDR to be used for pods inside the node.
DefaultPodCIDR = "10.244.0.0/16"
// Version is the current version of kic
Version = "v0.0.10"
// SHA of the kic base image

View File

@ -226,7 +226,7 @@ func (k *Bootstrapper) init(cfg config.ClusterConfig) error {
glog.Errorf("unable to create cluster role binding, some addons might not work: %v", err)
}
// the overlay is required for containerd and cri-o runtime: see #7428
if driver.IsKIC(cfg.Driver) && cfg.KubernetesConfig.ContainerRuntime != "docker" {
if config.MultiNode(cfg) || (driver.IsKIC(cfg.Driver) && cfg.KubernetesConfig.ContainerRuntime != "docker") {
if err := k.applyKICOverlay(cfg); err != nil {
glog.Errorf("failed to apply kic overlay: %v", err)
}

View File

@ -28,7 +28,9 @@ import (
"k8s.io/minikube/pkg/minikube/bootstrapper/kubeadm"
"k8s.io/minikube/pkg/minikube/command"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/machine"
)
// This init function is used to set the logtostderr variable to false so that INFO level log info does not clutter the CLI
@ -44,7 +46,6 @@ func init() {
}
// Bootstrapper returns a new bootstrapper for the cluster
// TODO(#6891): Remove node as an argument
func Bootstrapper(api libmachine.API, bootstrapperName string, cc config.ClusterConfig, r command.Runner) (bootstrapper.Bootstrapper, error) {
var b bootstrapper.Bootstrapper
var err error
@ -59,3 +60,21 @@ func Bootstrapper(api libmachine.API, bootstrapperName string, cc config.Cluster
}
return b, nil
}
// ControlPlaneBootstrapper returns the bootstrapper for the cluster's control plane
func ControlPlaneBootstrapper(mAPI libmachine.API, cc *config.ClusterConfig, bootstrapperName string) (bootstrapper.Bootstrapper, error) {
cp, err := config.PrimaryControlPlane(cc)
if err != nil {
return nil, errors.Wrap(err, "getting primary control plane")
}
h, err := machine.LoadHost(mAPI, driver.MachineName(*cc, cp))
if err != nil {
return nil, errors.Wrap(err, "getting control plane host")
}
cpr, err := machine.CommandRunner(h)
if err != nil {
return nil, errors.Wrap(err, "getting control plane command runner")
}
return Bootstrapper(mAPI, bootstrapperName, *cc, cpr)
}

View File

@ -24,6 +24,7 @@ import (
"os"
"github.com/pkg/errors"
"github.com/spf13/viper"
"k8s.io/minikube/pkg/minikube/localpath"
)
@ -200,3 +201,28 @@ func (c *simpleConfigLoader) WriteConfigToFile(profileName string, cc *ClusterCo
}
return ioutil.WriteFile(path, contents, 0644)
}
// MultiNodeCNIConfig add default CNI config needed for multinode clusters and saves off the config
func MultiNodeCNIConfig(cc *ClusterConfig) error {
if cc.KubernetesConfig.ExtraOptions.Get("pod-network-cidr", "kubeadm") == "" {
cc.KubernetesConfig.NetworkPlugin = "cni"
if err := cc.KubernetesConfig.ExtraOptions.Set(fmt.Sprintf("kubeadm.pod-network-cidr=%s", DefaultPodCIDR)); err != nil {
return err
}
return SaveProfile(cc.Name, cc)
}
return nil
}
// MultiNode returns true if the cluster has multiple nodes or if the request is asking for multinode
func MultiNode(cc ClusterConfig) bool {
if len(cc.Nodes) > 1 {
return true
}
if viper.GetInt("nodes") > 1 {
return true
}
return false
}

View File

@ -136,6 +136,13 @@ func SaveNode(cfg *ClusterConfig, node *Node) error {
if !update {
cfg.Nodes = append(cfg.Nodes, *node)
}
if MultiNode(*cfg) {
if err := MultiNodeCNIConfig(cfg); err != nil {
return err
}
}
return SaveProfile(viper.GetString(ProfileName), cfg)
}

View File

@ -22,6 +22,14 @@ import (
"github.com/blang/semver"
)
const (
// DefaultNetwork is the Docker default bridge network named "bridge"
// (https://docs.docker.com/network/bridge/#use-the-default-bridge-network)
DefaultNetwork = "bridge"
// DefaultPodCIDR is The CIDR to be used for pods inside the node.
DefaultPodCIDR = "10.244.0.0/16"
)
// Profile represents a minikube profile
type Profile struct {
Name string

View File

@ -24,7 +24,6 @@ import (
"strings"
"github.com/golang/glog"
"k8s.io/minikube/pkg/drivers/kic"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/registry"
)
@ -163,9 +162,8 @@ func FlagDefaults(name string) FlagHints {
fh := FlagHints{}
if name != None {
fh.CacheImages = true
// only for kic, till other run-times are available we auto-set containerd.
if name == Docker {
fh.ExtraOptions = append(fh.ExtraOptions, fmt.Sprintf("kubeadm.pod-network-cidr=%s", kic.DefaultPodCIDR))
fh.ExtraOptions = append(fh.ExtraOptions, fmt.Sprintf("kubeadm.pod-network-cidr=%s", config.DefaultPodCIDR))
}
return fh
}

View File

@ -164,20 +164,7 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) {
}
// Make sure to use the command runner for the control plane to generate the join token
cp, err := config.PrimaryControlPlane(starter.Cfg)
if err != nil {
return nil, errors.Wrap(err, "getting primary control plane")
}
h, err := machine.LoadHost(starter.MachineAPI, driver.MachineName(*starter.Cfg, cp))
if err != nil {
return nil, errors.Wrap(err, "getting control plane host")
}
cpr, err := machine.CommandRunner(h)
if err != nil {
return nil, errors.Wrap(err, "getting control plane command runner")
}
cpBs, err := cluster.Bootstrapper(starter.MachineAPI, viper.GetString(cmdcfg.Bootstrapper), *starter.Cfg, cpr)
cpBs, err := cluster.ControlPlaneBootstrapper(starter.MachineAPI, starter.Cfg, viper.GetString(cmdcfg.Bootstrapper))
if err != nil {
return nil, errors.Wrap(err, "getting control plane bootstrapper")
}
@ -217,7 +204,7 @@ func Provision(cc *config.ClusterConfig, n *config.Node, apiServer bool) (comman
}
// Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot.
// Hence, saveConfig must be called before startHost, and again afterwards when we know the IP.
// Hence, SaveProfile must be called before startHost, and again afterwards when we know the IP.
if err := config.SaveProfile(viper.GetString(config.ProfileName), cc); err != nil {
return nil, false, nil, nil, errors.Wrap(err, "Failed to save config")
}