add "(multi-control plane)" after each reference to "ha"
parent
cc4dc8c4f8
commit
54c6e698a7
|
@ -51,7 +51,7 @@ var nodeAddCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
if cpNode && !config.IsHA(*cc) {
|
||||
out.FailureT("Adding a control-plane node to a non-HA cluster is not currently supported. Please first delete the cluster and use 'minikube start --ha' to create new one.")
|
||||
out.FailureT("Adding a control-plane node to a non-HA (non-multi-control plane) cluster is not currently supported. Please first delete the cluster and use 'minikube start --ha' to create new one.")
|
||||
}
|
||||
|
||||
roles := []string{}
|
||||
|
@ -106,7 +106,7 @@ var nodeAddCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
func init() {
|
||||
nodeAddCmd.Flags().BoolVar(&cpNode, "control-plane", false, "If set, added node will become a control-plane. Defaults to false. Currently only supported for existing HA clusters.")
|
||||
nodeAddCmd.Flags().BoolVar(&cpNode, "control-plane", false, "If set, added node will become a control-plane. Defaults to false. Currently only supported for existing HA (multi-control plane) clusters.")
|
||||
nodeAddCmd.Flags().BoolVar(&workerNode, "worker", true, "If set, added node will be available as worker. Defaults to true.")
|
||||
nodeAddCmd.Flags().BoolVar(&deleteNodeOnFailure, "delete-on-failure", false, "If set, delete the current cluster if start fails and try again. Defaults to false.")
|
||||
|
||||
|
|
|
@ -190,7 +190,7 @@ func initMinikubeFlags() {
|
|||
startCmd.Flags().Bool(nativeSSH, true, "Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'.")
|
||||
startCmd.Flags().Bool(autoUpdate, true, "If set, automatically updates drivers to the latest version. Defaults to true.")
|
||||
startCmd.Flags().Bool(installAddons, true, "If set, install addons. Defaults to true.")
|
||||
startCmd.Flags().Bool(ha, false, "Create Highly Available Cluster with a minimum of three control-plane nodes that will also be marked for work.")
|
||||
startCmd.Flags().Bool(ha, false, "Create Highly Available Multi-Control Plane Cluster with a minimum of three control-plane nodes that will also be marked for work.")
|
||||
startCmd.Flags().IntP(nodes, "n", 1, "The total number of nodes to spin up. Defaults to 1.")
|
||||
startCmd.Flags().Bool(preload, true, "If set, download tarball of preloaded images if available to improve start time. Defaults to true.")
|
||||
startCmd.Flags().Bool(noKubernetes, false, "If set, minikube VM/container will start without starting or configuring Kubernetes. (only works on new clusters)")
|
||||
|
@ -666,20 +666,20 @@ func addFeatureGate(featureGates, s string) string {
|
|||
return strings.Join(split, ",")
|
||||
}
|
||||
|
||||
// validateHANodeCount ensures correct total number of nodes in HA cluster.
|
||||
// validateHANodeCount ensures correct total number of nodes in ha (multi-control plane) cluster.
|
||||
func validateHANodeCount(cmd *cobra.Command) {
|
||||
if !viper.GetBool(ha) {
|
||||
return
|
||||
}
|
||||
|
||||
// set total number of nodes in ha cluster to 3, if not otherwise defined by user
|
||||
// set total number of nodes in ha (multi-control plane) cluster to 3, if not otherwise defined by user
|
||||
if !cmd.Flags().Changed(nodes) {
|
||||
viper.Set(nodes, 3)
|
||||
}
|
||||
|
||||
// respect user preference, if correct
|
||||
if cmd.Flags().Changed(nodes) && viper.GetInt(nodes) < 3 {
|
||||
exit.Message(reason.Usage, "HA clusters require 3 or more control-plane nodes")
|
||||
exit.Message(reason.Usage, "HA (multi-control plane) clusters require 3 or more control-plane nodes")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -744,11 +744,11 @@ func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterC
|
|||
}
|
||||
|
||||
if cmd.Flags().Changed(ha) {
|
||||
out.WarningT("Changing the HA mode of an existing minikube cluster is not currently supported. Please first delete the cluster and use 'minikube start --ha' to create new one.")
|
||||
out.WarningT("Changing the HA (multi-control plane) mode of an existing minikube cluster is not currently supported. Please first delete the cluster and use 'minikube start --ha' to create new one.")
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(apiServerPort) && config.IsHA(*existing) {
|
||||
out.WarningT("Changing the apiserver port of an existing minikube ha cluster is not currently supported. Please first delete the cluster.")
|
||||
out.WarningT("Changing the API server port of an existing minikube HA (multi-control plane) cluster is not currently supported. Please first delete the cluster.")
|
||||
} else {
|
||||
updateIntFromFlag(cmd, &cc.APIServerPort, apiServerPort)
|
||||
}
|
||||
|
|
|
@ -623,8 +623,8 @@ func (k *Bootstrapper) restartPrimaryControlPlane(cfg config.ClusterConfig) erro
|
|||
// DANGER: This log message is hard-coded in an integration test!
|
||||
klog.Infof("The running cluster does not require reconfiguration: %s", host)
|
||||
// taking a shortcut, as the cluster seems to be properly configured
|
||||
// except for vm driver in non-ha cluster - fallback to old behaviour
|
||||
// here we're making a tradeoff to avoid significant (10sec) waiting on restarting stopped non-ha cluster with vm driver
|
||||
// except for vm driver in non-ha (non-multi-control plane) cluster - fallback to old behaviour
|
||||
// here we're making a tradeoff to avoid significant (10sec) waiting on restarting stopped non-ha (non-multi-control plane) cluster with vm driver
|
||||
// where such cluster needs to be reconfigured b/c of (currently) ephemeral config, but then also,
|
||||
// starting already started such cluster (hard to know w/o investing that time) will fallthrough the same path and reconfigure cluster
|
||||
if config.IsHA(cfg) || !driver.IsVM(cfg.Driver) {
|
||||
|
@ -954,7 +954,7 @@ func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cru
|
|||
}
|
||||
files = append(files, assets.NewMemoryAssetTarget(kubeadmCfg, constants.KubeadmYamlPath+".new", "0640"))
|
||||
}
|
||||
// deploy kube-vip for ha cluster
|
||||
// deploy kube-vip for ha (multi-control plane) cluster
|
||||
if config.IsHA(cfg) {
|
||||
// workaround for kube-vip
|
||||
// only applicable for k8s v1.29+ during primary control-plane node's kubeadm init (ie, first boot)
|
||||
|
@ -1043,7 +1043,7 @@ func (k *Bootstrapper) LabelAndUntaintNode(cfg config.ClusterConfig, n config.No
|
|||
return k.labelAndUntaintNode(cfg, n)
|
||||
}
|
||||
|
||||
// labelAndUntaintNode applies minikube labels to node and removes NoSchedule taints that might be set to secondary control-plane nodes by default in ha cluster.
|
||||
// labelAndUntaintNode applies minikube labels to node and removes NoSchedule taints that might be set to secondary control-plane nodes by default in ha (multi-control plane) cluster.
|
||||
func (k *Bootstrapper) labelAndUntaintNode(cfg config.ClusterConfig, n config.Node) error {
|
||||
// time node was created. time format is based on ISO 8601 (RFC 3339)
|
||||
// converting - and : to _ because of Kubernetes label restriction
|
||||
|
|
|
@ -247,7 +247,7 @@ func MultiNode(cc ClusterConfig) bool {
|
|||
return viper.GetInt("nodes") > 1
|
||||
}
|
||||
|
||||
// IsHA returns true if HA is requested.
|
||||
// IsHA returns true if ha (multi-control plane) cluster is requested.
|
||||
func IsHA(cc ClusterConfig) bool {
|
||||
if len(ControlPlanes(cc)) > 1 {
|
||||
return true
|
||||
|
|
|
@ -90,7 +90,7 @@ func fixHost(api libmachine.API, cc *config.ClusterConfig, n *config.Node) (*hos
|
|||
return h, errors.Wrap(err, "post-start")
|
||||
}
|
||||
|
||||
// on vm node restart and for ha topology only (for now),
|
||||
// on vm node restart and for ha (multi-control plane) topology only (for now),
|
||||
// we deliberately aim to restore backed up machine config early,
|
||||
// so that remaining code logic can amend files as needed,
|
||||
// it's intentionally non-fatal in case of any error
|
||||
|
|
|
@ -153,7 +153,7 @@ func Start(starter Starter) (*kubeconfig.Settings, error) { // nolint:gocyclo
|
|||
out.Err("Failed to inject host.minikube.internal into CoreDNS, this will limit the pods access to the host IP")
|
||||
}
|
||||
}
|
||||
// scale down CoreDNS from default 2 to 1 replica only for non-ha cluster and if optimisation is not disabled
|
||||
// scale down CoreDNS from default 2 to 1 replica only for non-ha (non-multi-control plane) cluster and if optimisation is not disabled
|
||||
if !starter.Cfg.DisableOptimizations && !config.IsHA(*starter.Cfg) {
|
||||
if err := kapi.ScaleDeployment(starter.Cfg.Name, meta.NamespaceSystem, kconst.CoreDNSDeploymentName, 1); err != nil {
|
||||
klog.Errorf("Unable to scale down deployment %q in namespace %q to 1 replica: %v", kconst.CoreDNSDeploymentName, meta.NamespaceSystem, err)
|
||||
|
@ -167,7 +167,7 @@ func Start(starter Starter) (*kubeconfig.Settings, error) { // nolint:gocyclo
|
|||
return nil, errors.Wrap(err, "Failed to get bootstrapper")
|
||||
}
|
||||
|
||||
// for ha, use already running control-plane node to copy over certs to this secondary control-plane node
|
||||
// for ha (multi-control plane) cluster, use already running control-plane node to copy over certs to this secondary control-plane node
|
||||
cpr := mustload.Running(starter.Cfg.Name).CP.Runner
|
||||
if err = bs.SetupCerts(*starter.Cfg, *starter.Node, cpr); err != nil {
|
||||
return nil, errors.Wrap(err, "setting up certs")
|
||||
|
@ -178,7 +178,7 @@ func Start(starter Starter) (*kubeconfig.Settings, error) { // nolint:gocyclo
|
|||
}
|
||||
|
||||
// join cluster only on first node start
|
||||
// except for vm driver in non-ha cluster - fallback to old behaviour
|
||||
// except for vm driver in non-ha (non-multi-control plane) cluster - fallback to old behaviour
|
||||
if !starter.PreExists || (driver.IsVM(starter.Cfg.Driver) && !config.IsHA(*starter.Cfg)) {
|
||||
// make sure to use the command runner for the primary control plane to generate the join token
|
||||
pcpBs, err := cluster.ControlPlaneBootstrapper(starter.MachineAPI, starter.Cfg, viper.GetString(cmdcfg.Bootstrapper))
|
||||
|
@ -227,9 +227,9 @@ func Start(starter Starter) (*kubeconfig.Settings, error) { // nolint:gocyclo
|
|||
prepareNone()
|
||||
}
|
||||
|
||||
// for ha cluster, primary control-plane node will not come up alone until secondary joins
|
||||
// for ha (multi-control plane) cluster, primary control-plane node will not come up alone until secondary joins
|
||||
if config.IsHA(*starter.Cfg) && config.IsPrimaryControlPlane(*starter.Cfg, *starter.Node) {
|
||||
klog.Infof("HA cluster: will skip waiting for primary control-plane node %+v", starter.Node)
|
||||
klog.Infof("HA (multi-control plane) cluster: will skip waiting for primary control-plane node %+v", starter.Node)
|
||||
} else {
|
||||
klog.Infof("Will wait %s for node %+v", viper.GetDuration(waitTimeout), starter.Node)
|
||||
if err := bs.WaitForNode(*starter.Cfg, *starter.Node, viper.GetDuration(waitTimeout)); err != nil {
|
||||
|
|
|
@ -35,10 +35,10 @@ import (
|
|||
"k8s.io/minikube/pkg/util/retry"
|
||||
)
|
||||
|
||||
// TestHA tests all ha cluster functionality
|
||||
// TestHA tests all ha (multi-control plane) cluster functionality
|
||||
func TestHA(t *testing.T) {
|
||||
if NoneDriver() {
|
||||
t.Skip("none driver does not support multinode/ha")
|
||||
t.Skip("none driver does not support multinode/ha(multi-control plane) cluster")
|
||||
}
|
||||
|
||||
if DockerDriver() {
|
||||
|
@ -94,13 +94,13 @@ func TestHA(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
// validateHAStartCluster ensures ha cluster can start.
|
||||
// validateHAStartCluster ensures ha (multi-control plane) cluster can start.
|
||||
func validateHAStartCluster(ctx context.Context, t *testing.T, profile string) {
|
||||
// start ha cluster
|
||||
// start ha (multi-control plane) cluster
|
||||
startArgs := append([]string{"start", "-p", profile, "--wait=true", "--memory=2200", "--ha", "-v=7", "--alsologtostderr"}, StartArgs()...)
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to fresh-start ha cluster. args %q : %v", rr.Command(), err)
|
||||
t.Fatalf("failed to fresh-start ha (multi-control plane) cluster. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
// ensure minikube status shows 3 operational control-plane nodes
|
||||
|
@ -122,17 +122,17 @@ func validateHAStartCluster(ctx context.Context, t *testing.T, profile string) {
|
|||
}
|
||||
}
|
||||
|
||||
// validateHADeployApp deploys an app to ha cluster and ensures all nodes can serve traffic.
|
||||
// validateHADeployApp deploys an app to ha (multi-control plane) cluster and ensures all nodes can serve traffic.
|
||||
func validateHADeployApp(ctx context.Context, t *testing.T, profile string) {
|
||||
// Create a deployment for app
|
||||
_, err := Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "apply", "-f", "./testdata/ha/ha-pod-dns-test.yaml"))
|
||||
if err != nil {
|
||||
t.Errorf("failed to create busybox deployment to ha cluster")
|
||||
t.Errorf("failed to create busybox deployment to ha (multi-control plane) cluster")
|
||||
}
|
||||
|
||||
_, err = Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "rollout", "status", "deployment/busybox"))
|
||||
if err != nil {
|
||||
t.Errorf("failed to deploy busybox to ha cluster")
|
||||
t.Errorf("failed to deploy busybox to ha (multi-control plane) cluster")
|
||||
}
|
||||
|
||||
// resolve Pod IPs
|
||||
|
@ -221,13 +221,13 @@ func validateHAPingHostFromPods(ctx context.Context, t *testing.T, profile strin
|
|||
}
|
||||
}
|
||||
|
||||
// validateHAAddWorkerNode uses the minikube node add command to add a worker node to an existing ha cluster.
|
||||
// validateHAAddWorkerNode uses the minikube node add command to add a worker node to an existing ha (multi-control plane) cluster.
|
||||
func validateHAAddWorkerNode(ctx context.Context, t *testing.T, profile string) {
|
||||
// add a node to the current ha cluster
|
||||
// add a node to the current ha (multi-control plane) cluster
|
||||
addArgs := []string{"node", "add", "-p", profile, "-v=7", "--alsologtostderr"}
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), addArgs...))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to add worker node to current ha cluster. args %q : %v", rr.Command(), err)
|
||||
t.Fatalf("failed to add worker node to current ha (multi-control plane) cluster. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
// ensure minikube status shows 3 operational control-plane nodes and 1 worker node
|
||||
|
@ -276,7 +276,7 @@ func validateHANodeLabels(ctx context.Context, t *testing.T, profile string) {
|
|||
}
|
||||
}
|
||||
|
||||
// validateHAStatusHAppy ensures minikube profile list outputs correct with ha clusters.
|
||||
// validateHAStatusHAppy ensures minikube profile list outputs correct with ha (multi-control plane) clusters.
|
||||
func validateHAStatusHAppy(ctx context.Context, t *testing.T, profile string) {
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", "list", "--output", "json"))
|
||||
if err != nil {
|
||||
|
@ -317,7 +317,7 @@ func validateHAStatusHAppy(ctx context.Context, t *testing.T, profile string) {
|
|||
}
|
||||
}
|
||||
|
||||
// validateHACopyFile ensures minikube cp works with ha clusters.
|
||||
// validateHACopyFile ensures minikube cp works with ha (multi-control plane) clusters.
|
||||
func validateHACopyFile(ctx context.Context, t *testing.T, profile string) {
|
||||
if NoneDriver() {
|
||||
t.Skipf("skipping: cp is unsupported by none driver")
|
||||
|
@ -357,7 +357,7 @@ func validateHACopyFile(ctx context.Context, t *testing.T, profile string) {
|
|||
}
|
||||
}
|
||||
|
||||
// validateHAStopSecondaryNode tests ha cluster by stopping a secondary control-plane node using minikube node stop command.
|
||||
// validateHAStopSecondaryNode tests ha (multi-control plane) cluster by stopping a secondary control-plane node using minikube node stop command.
|
||||
func validateHAStopSecondaryNode(ctx context.Context, t *testing.T, profile string) {
|
||||
// run minikube node stop on secondary control-plane node
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "stop", SecondNodeName, "-v=7", "--alsologtostderr"))
|
||||
|
@ -385,7 +385,7 @@ func validateHAStopSecondaryNode(ctx context.Context, t *testing.T, profile stri
|
|||
}
|
||||
}
|
||||
|
||||
// validateHAStatusDegraded ensures minikube profile list outputs correct with ha clusters.
|
||||
// validateHAStatusDegraded ensures minikube profile list outputs correct with ha (multi-control plane) clusters.
|
||||
func validateHAStatusDegraded(ctx context.Context, t *testing.T, profile string) {
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", "list", "--output", "json"))
|
||||
if err != nil {
|
||||
|
@ -423,7 +423,7 @@ func validateHARestartSecondaryNode(ctx context.Context, t *testing.T, profile s
|
|||
t.Errorf("secondary control-plane node start returned an error. args %q: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
// ensure minikube status shows all 4 nodes running, waiting for ha cluster/apiservers to stabilise
|
||||
// ensure minikube status shows all 4 nodes running, waiting for ha (multi-control plane) cluster/apiservers to stabilise
|
||||
minikubeStatus := func() error {
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "-v=7", "--alsologtostderr"))
|
||||
return err
|
||||
|
@ -525,7 +525,7 @@ func validateHADeleteSecondaryNode(ctx context.Context, t *testing.T, profile st
|
|||
}
|
||||
}
|
||||
|
||||
// validateHAStopCluster runs minikube stop on a ha cluster.
|
||||
// validateHAStopCluster runs minikube stop on a ha (multi-control plane) cluster.
|
||||
func validateHAStopCluster(ctx context.Context, t *testing.T, profile string) {
|
||||
// Run minikube stop on the cluster
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "stop", "-v=7", "--alsologtostderr"))
|
||||
|
@ -553,7 +553,7 @@ func validateHAStopCluster(ctx context.Context, t *testing.T, profile string) {
|
|||
}
|
||||
}
|
||||
|
||||
// validateHARestartCluster verifies a soft restart on a ha cluster works.
|
||||
// validateHARestartCluster verifies a soft restart on a ha (multi-control plane) cluster works.
|
||||
func validateHARestartCluster(ctx context.Context, t *testing.T, profile string) {
|
||||
// restart cluster with minikube start
|
||||
startArgs := append([]string{"start", "-p", profile, "--wait=true", "-v=7", "--alsologtostderr"}, StartArgs()...)
|
||||
|
@ -598,13 +598,13 @@ func validateHARestartCluster(ctx context.Context, t *testing.T, profile string)
|
|||
}
|
||||
}
|
||||
|
||||
// validateHAAddSecondaryNode uses the minikube node add command to add a secondary control-plane node to an existing ha cluster.
|
||||
// validateHAAddSecondaryNode uses the minikube node add command to add a secondary control-plane node to an existing ha (multi-control plane) cluster.
|
||||
func validateHAAddSecondaryNode(ctx context.Context, t *testing.T, profile string) {
|
||||
// add a node to the current ha cluster
|
||||
// add a node to the current ha (multi-control plane) cluster
|
||||
addArgs := []string{"node", "add", "-p", profile, "--control-plane", "-v=7", "--alsologtostderr"}
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), addArgs...))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to add control-plane node to current ha cluster. args %q : %v", rr.Command(), err)
|
||||
t.Fatalf("failed to add control-plane node to current ha (multi-control plane) cluster. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
// ensure minikube status shows 3 operational control-plane nodes and 1 worker node
|
||||
|
|
Loading…
Reference in New Issue