spowelljr: rename HA() to IsHA()
parent
2ec34729ad
commit
3a0ada1f6d
|
@ -175,7 +175,7 @@ func profileStatus(p *config.Profile, api libmachine.API) string {
|
|||
healthyCPs++
|
||||
}
|
||||
|
||||
if config.HA(*p.Config) {
|
||||
if config.IsHA(*p.Config) {
|
||||
switch {
|
||||
case healthyCPs < 2:
|
||||
return state.Stopped.String()
|
||||
|
@ -204,7 +204,7 @@ func profilesToTableData(profiles []*config.Profile) [][]string {
|
|||
for _, p := range profiles {
|
||||
cpIP := p.Config.KubernetesConfig.APIServerHAVIP
|
||||
cpPort := p.Config.APIServerPort
|
||||
if !config.HA(*p.Config) {
|
||||
if !config.IsHA(*p.Config) {
|
||||
cp, err := config.ControlPlane(*p.Config)
|
||||
if err != nil {
|
||||
exit.Error(reason.GuestCpConfig, "error getting control-plane node", err)
|
||||
|
|
|
@ -50,7 +50,7 @@ var nodeAddCmd = &cobra.Command{
|
|||
out.FailureT("none driver does not support multi-node clusters")
|
||||
}
|
||||
|
||||
if cpNode && !config.HA(*cc) {
|
||||
if cpNode && !config.IsHA(*cc) {
|
||||
out.FailureT("Adding a control-plane node to a non-HA cluster is not currently supported. Please first delete the cluster and use 'minikube start --ha' to create new one.")
|
||||
}
|
||||
|
||||
|
|
|
@ -403,10 +403,9 @@ func nodeStatus(api libmachine.API, cc config.ClusterConfig, n config.Node) (*St
|
|||
if cc.Addons["auto-pause"] {
|
||||
hostname, _, port, err = driver.AutoPauseProxyEndpoint(&cc, &n, host.DriverName)
|
||||
} else {
|
||||
if config.HA(cc) {
|
||||
if config.IsHA(cc) {
|
||||
hostname = cc.KubernetesConfig.APIServerHAVIP
|
||||
port = cc.APIServerPort
|
||||
err = nil // checked below
|
||||
} else {
|
||||
hostname, _, port, err = driver.ControlPlaneEndpoint(&cc, &n, host.DriverName)
|
||||
}
|
||||
|
|
|
@ -268,7 +268,7 @@ func generateProfileCerts(cfg config.ClusterConfig, n config.Node, shared shared
|
|||
for _, n := range config.ControlPlanes(cfg) {
|
||||
apiServerIPs = append(apiServerIPs, net.ParseIP(n.IP))
|
||||
}
|
||||
if config.HA(cfg) {
|
||||
if config.IsHA(cfg) {
|
||||
apiServerIPs = append(apiServerIPs, net.ParseIP(cfg.KubernetesConfig.APIServerHAVIP))
|
||||
}
|
||||
|
||||
|
|
|
@ -627,7 +627,7 @@ func (k *Bootstrapper) restartPrimaryControlPlane(cfg config.ClusterConfig) erro
|
|||
// here we're making a tradeoff to avoid significant (10sec) waiting on restarting stopped non-ha cluster with vm driver
|
||||
// where such cluster needs to be reconfigured b/c of (currently) ephemeral config, but then also,
|
||||
// starting already started such cluster (hard to know w/o investing that time) will fallthrough the same path and reconfigure cluster
|
||||
if config.HA(cfg) || !driver.IsVM(cfg.Driver) {
|
||||
if config.IsHA(cfg) || !driver.IsVM(cfg.Driver) {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
|
@ -955,7 +955,7 @@ func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cru
|
|||
files = append(files, assets.NewMemoryAssetTarget(kubeadmCfg, constants.KubeadmYamlPath+".new", "0640"))
|
||||
}
|
||||
// deploy kube-vip for ha cluster
|
||||
if config.HA(cfg) {
|
||||
if config.IsHA(cfg) {
|
||||
// workaround for kube-vip
|
||||
// only applicable for k8s v1.29+ during primary control-plane node's kubeadm init (ie, first boot)
|
||||
// TODO (prezha): remove when fixed upstream - ref: https://github.com/kube-vip/kube-vip/issues/684#issuecomment-1864855405
|
||||
|
@ -998,7 +998,7 @@ func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cru
|
|||
// add "control-plane.minikube.internal" dns alias
|
||||
// note: needs to be called after APIServerHAVIP is set (in startPrimaryControlPlane()) and before kubeadm kicks off
|
||||
cpIP := cfg.KubernetesConfig.APIServerHAVIP
|
||||
if !config.HA(cfg) {
|
||||
if !config.IsHA(cfg) {
|
||||
cp, err := config.ControlPlane(cfg)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "get control-plane node")
|
||||
|
|
|
@ -247,8 +247,8 @@ func MultiNode(cc ClusterConfig) bool {
|
|||
return viper.GetInt("nodes") > 1
|
||||
}
|
||||
|
||||
// HA returns true if HA is requested.
|
||||
func HA(cc ClusterConfig) bool {
|
||||
// IsHA returns true if HA is requested.
|
||||
func IsHA(cc ClusterConfig) bool {
|
||||
if len(ControlPlanes(cc)) > 1 {
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -94,7 +94,7 @@ func fixHost(api libmachine.API, cc *config.ClusterConfig, n *config.Node) (*hos
|
|||
// we deliberately aim to restore backed up machine config early,
|
||||
// so that remaining code logic can amend files as needed,
|
||||
// it's intentionally non-fatal in case of any error
|
||||
if driver.IsVM(h.DriverName) && config.HA(*cc) {
|
||||
if driver.IsVM(h.DriverName) && config.IsHA(*cc) {
|
||||
if err := restore(*h); err != nil {
|
||||
klog.Warningf("cannot read backup folder, skipping restore: %v", err)
|
||||
}
|
||||
|
|
|
@ -154,7 +154,7 @@ func Start(starter Starter) (*kubeconfig.Settings, error) { // nolint:gocyclo
|
|||
}
|
||||
}
|
||||
// scale down CoreDNS from default 2 to 1 replica only for non-ha cluster and if optimisation is not disabled
|
||||
if !starter.Cfg.DisableOptimizations && !config.HA(*starter.Cfg) {
|
||||
if !starter.Cfg.DisableOptimizations && !config.IsHA(*starter.Cfg) {
|
||||
if err := kapi.ScaleDeployment(starter.Cfg.Name, meta.NamespaceSystem, kconst.CoreDNSDeploymentName, 1); err != nil {
|
||||
klog.Errorf("Unable to scale down deployment %q in namespace %q to 1 replica: %v", kconst.CoreDNSDeploymentName, meta.NamespaceSystem, err)
|
||||
}
|
||||
|
@ -179,7 +179,7 @@ func Start(starter Starter) (*kubeconfig.Settings, error) { // nolint:gocyclo
|
|||
|
||||
// join cluster only on first node start
|
||||
// except for vm driver in non-ha cluster - fallback to old behaviour
|
||||
if !starter.PreExists || (driver.IsVM(starter.Cfg.Driver) && !config.HA(*starter.Cfg)) {
|
||||
if !starter.PreExists || (driver.IsVM(starter.Cfg.Driver) && !config.IsHA(*starter.Cfg)) {
|
||||
// make sure to use the command runner for the primary control plane to generate the join token
|
||||
pcpBs, err := cluster.ControlPlaneBootstrapper(starter.MachineAPI, starter.Cfg, viper.GetString(cmdcfg.Bootstrapper))
|
||||
if err != nil {
|
||||
|
@ -228,7 +228,7 @@ func Start(starter Starter) (*kubeconfig.Settings, error) { // nolint:gocyclo
|
|||
}
|
||||
|
||||
// for ha cluster, primary control-plane node will not come up alone until secondary joins
|
||||
if config.HA(*starter.Cfg) && config.IsPrimaryControlPlane(*starter.Cfg, *starter.Node) {
|
||||
if config.IsHA(*starter.Cfg) && config.IsPrimaryControlPlane(*starter.Cfg, *starter.Node) {
|
||||
klog.Infof("HA cluster: will skip waiting for primary control-plane node %+v", starter.Node)
|
||||
} else {
|
||||
klog.Infof("Will wait %s for node %+v", viper.GetDuration(waitTimeout), starter.Node)
|
||||
|
@ -278,7 +278,7 @@ func startPrimaryControlPlane(starter Starter, cr cruntime.Manager) (*kubeconfig
|
|||
return nil, nil, fmt.Errorf("node not marked as primary control-plane")
|
||||
}
|
||||
|
||||
if config.HA(*starter.Cfg) {
|
||||
if config.IsHA(*starter.Cfg) {
|
||||
n, err := network.Inspect(starter.Node.IP)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "inspect network")
|
||||
|
@ -625,7 +625,7 @@ func setupKubeadm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node,
|
|||
func setupKubeconfig(h host.Host, cc config.ClusterConfig, n config.Node, clusterName string) *kubeconfig.Settings {
|
||||
host := cc.KubernetesConfig.APIServerHAVIP
|
||||
port := cc.APIServerPort
|
||||
if !config.HA(cc) {
|
||||
if !config.IsHA(cc) {
|
||||
var err error
|
||||
if host, _, port, err = driver.ControlPlaneEndpoint(&cc, &n, h.DriverName); err != nil {
|
||||
exit.Message(reason.DrvCPEndpoint, fmt.Sprintf("failed to construct cluster server address: %v", err), out.V{"profileArg": fmt.Sprintf("--profile=%s", clusterName)})
|
||||
|
|
Loading…
Reference in New Issue