diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index c72ff85e33..9758c19a4c 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -947,8 +947,9 @@ func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cru if n.ControlPlane { // for primary control-plane node only, generate kubeadm config based on current params // on node restart, it will be checked against later if anything needs changing + var kubeadmCfg []byte if config.IsPrimaryControlPlane(cfg, n) { - kubeadmCfg, err := bsutil.GenerateKubeadmYAML(cfg, n, r) + kubeadmCfg, err = bsutil.GenerateKubeadmYAML(cfg, n, r) if err != nil { return errors.Wrap(err, "generating kubeadm cfg") } @@ -964,7 +965,7 @@ func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cru return errors.Wrapf(err, "parsing kubernetes version %q", cfg.KubernetesConfig.KubernetesVersion) } workaround := kv.GTE(semver.Version{Major: 1, Minor: 29}) && config.IsPrimaryControlPlane(cfg, n) && len(config.ControlPlanes(cfg)) == 1 - kubevipCfg, err := kubevip.Configure(cfg, workaround) + kubevipCfg, err := kubevip.Configure(cfg, k.c, kubeadmCfg, workaround) if err != nil { klog.Errorf("couldn't generate kube-vip config, this might cause issues (will continue): %v", err) } else { diff --git a/pkg/minikube/cluster/ha/kube-vip/kube-vip.go b/pkg/minikube/cluster/ha/kube-vip/kube-vip.go index cd34ba63e1..4a8e3fe882 100644 --- a/pkg/minikube/cluster/ha/kube-vip/kube-vip.go +++ b/pkg/minikube/cluster/ha/kube-vip/kube-vip.go @@ -19,10 +19,14 @@ package kubevip import ( "bytes" "html/template" + "os/exec" + "strings" "github.com/pkg/errors" "k8s.io/klog/v2" + "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/driver" ) const Manifest = "kube-vip.yaml" @@ -72,10 +76,12 @@ spec: value: {{ .VIP }} - name: prometheus_server value: :2112 + {{- if .EnableLB }} - name : lb_enable value: "true" - name: lb_port value: "{{ .Port }}" + {{- end}} image: ghcr.io/kube-vip/kube-vip:v0.7.1 imagePullPolicy: IfNotPresent name: kube-vip @@ -101,17 +107,19 @@ status: {} `)) // Configure takes last client ip address in cluster nodes network subnet as vip address and generates kube-vip.yaml file. -func Configure(cc config.ClusterConfig, workaround bool) ([]byte, error) { +func Configure(cc config.ClusterConfig, r command.Runner, kubeadmCfg []byte, workaround bool) ([]byte, error) { klog.Info("generating kube-vip config ...") params := struct { VIP string Port int AdminConf string + EnableLB bool }{ VIP: cc.KubernetesConfig.APIServerHAVIP, Port: cc.APIServerPort, AdminConf: "/etc/kubernetes/admin.conf", + EnableLB: enableCPLB(cc, r, kubeadmCfg), } if workaround { params.AdminConf = "/etc/kubernetes/super-admin.conf" @@ -126,3 +134,25 @@ func Configure(cc config.ClusterConfig, workaround bool) ([]byte, error) { return b.Bytes(), nil } + +// enableCPLB auto-enables control-plane load-balancing, if possible - currently only possible with ipvs. +// ref: https://kube-vip.io/docs/about/architecture/?query=ipvs#control-plane-load-balancing +func enableCPLB(cc config.ClusterConfig, r command.Runner, kubeadmCfg []byte) bool { + // note known issue: "service lb with ipvs mode won't work with kubeproxy that is configured with ipvs mode" + // ref: https://kube-vip.io/docs/about/architecture/?query=ipvs#known-issues + // so we only want to enable control-plane load-balancing if kube-proxy mode is not set to ipvs + // ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#proxy-mode-ipvs + if ipvs := strings.EqualFold(string(kubeadmCfg), "mode: ipvs"); ipvs { + klog.Info("giving up enabling control-plane load-balancing as kube-proxy mode appears to be set to ipvs") + return false + } + klog.Info("enabling control-plane load-balancing as kube-proxy mode appears not be set to ipvs") + // for vm driver, also ensure required ipvs kernel modules are loaded to enable kube-vip's control-plane load-balancing feature + // ref: https://github.com/kubernetes/kubernetes/blob/f90461c43e881d320b78d48793db10c110d488d1/pkg/proxy/ipvs/README.md?plain=1#L257-L269 + if driver.IsVM(cc.Driver) { + if _, err := r.RunCmd(exec.Command("sudo", "sh", "-c", "modprobe --all ip_vs ip_vs_rr ip_vs_wrr ip_vs_sh nf_conntrack")); err != nil { + klog.Errorf("unable to load ipvs kernel modules: %v", err) + } + } + return true +} diff --git a/test/integration/ha_test.go b/test/integration/ha_test.go index cdae834d97..258c4be88f 100644 --- a/test/integration/ha_test.go +++ b/test/integration/ha_test.go @@ -35,8 +35,8 @@ import ( "k8s.io/minikube/pkg/util/retry" ) -// TestMutliControlPlane tests all ha (multi-control plane) cluster functionality -func TestMutliControlPlane(t *testing.T) { +// TestMultiControlPlane tests all ha (multi-control plane) cluster functionality +func TestMultiControlPlane(t *testing.T) { if NoneDriver() { t.Skip("none driver does not support multinode/ha(multi-control plane) cluster") }