clean up status output and have multinode survive cluster restarts

pull/6787/head
Sharif Elgamal 2020-03-18 21:28:03 -07:00
parent fa97a5bf0d
commit f9b38dc04e
5 changed files with 22 additions and 16 deletions

View File

@ -350,6 +350,9 @@ func runStart(cmd *cobra.Command, args []string) {
kubeconfig := node.Start(cc, n, existingAddons, true)
numNodes := viper.GetInt(nodes)
if numNodes == 1 && existing != nil {
numNodes = len(existing.Nodes)
}
if numNodes > 1 {
if driver.BareMetal(driverName) {
out.T(out.Meh, "The none driver is not compatible with multi-node clusters.")

View File

@ -67,6 +67,7 @@ type Status struct {
Kubelet string
APIServer string
Kubeconfig string
Worker bool
}
const (
@ -78,6 +79,12 @@ host: {{.Host}}
kubelet: {{.Kubelet}}
apiserver: {{.APIServer}}
kubeconfig: {{.Kubeconfig}}
`
workerStatusFormat = `{{.Name}}
host: {{.Host}}
kubelet: {{.Kubelet}}
`
)
@ -153,15 +160,7 @@ func exitCode(st *Status) int {
func status(api libmachine.API, name string, controlPlane bool) (*Status, error) {
var profile, node string
if strings.Contains(name, "-") {
profile = strings.Split(name, "-")[0]
node = strings.Split(name, "-")[1]
} else {
profile = name
node = name
}
profile, node := driver.ClusterNameFromMachine(name)
st := &Status{
Name: node,
@ -169,6 +168,7 @@ func status(api libmachine.API, name string, controlPlane bool) (*Status, error)
APIServer: Nonexistent,
Kubelet: Nonexistent,
Kubeconfig: Nonexistent,
Worker: !controlPlane,
}
hs, err := machine.Status(api, name)
@ -265,6 +265,9 @@ For the list accessible variables for the template, see the struct values here:
func statusText(st *Status, w io.Writer) error {
tmpl, err := template.New("status").Parse(statusFormat)
if st.Worker && statusFormat == defaultStatusFormat {
tmpl, err = template.New("worker-status").Parse(workerStatusFormat)
}
if err != nil {
return err
}

View File

@ -52,17 +52,17 @@ func TestStatusText(t *testing.T) {
{
name: "ok",
state: &Status{Name: "minikube", Host: "Running", Kubelet: "Running", APIServer: "Running", Kubeconfig: Configured},
want: "minikube\nhost: Running\nkubelet: Running\napiserver: Running\nkubeconfig: Configured\n",
want: "minikube\nhost: Running\nkubelet: Running\napiserver: Running\nkubeconfig: Configured\n\n",
},
{
name: "paused",
state: &Status{Name: "minikube", Host: "Running", Kubelet: "Stopped", APIServer: "Paused", Kubeconfig: Configured},
want: "minikube\nhost: Running\nkubelet: Stopped\napiserver: Paused\nkubeconfig: Configured\n",
want: "minikube\nhost: Running\nkubelet: Stopped\napiserver: Paused\nkubeconfig: Configured\n\n",
},
{
name: "down",
state: &Status{Name: "minikube", Host: "Stopped", Kubelet: "Stopped", APIServer: "Stopped", Kubeconfig: Misconfigured},
want: "minikube\nhost: Stopped\nkubelet: Stopped\napiserver: Stopped\nkubeconfig: Misconfigured\n\nWARNING: Your kubectl is pointing to stale minikube-vm.\nTo fix the kubectl context, run `minikube update-context`\n",
want: "minikube\nhost: Stopped\nkubelet: Stopped\napiserver: Stopped\nkubeconfig: Misconfigured\n\n\nWARNING: Your kubectl is pointing to stale minikube-vm.\nTo fix the kubectl context, run `minikube update-context`\n",
},
}
for _, tc := range tests {

View File

@ -238,9 +238,9 @@ func MachineName(cc config.ClusterConfig, n config.Node) string {
}
// ClusterNameFromMachine retrieves the cluster name embedded in the machine name
func ClusterNameFromMachine(name string) string {
func ClusterNameFromMachine(name string) (string, string) {
if strings.Contains(name, "---") {
return strings.Split(name, "---")[0]
return strings.Split(name, "---")[0], strings.Split(name, "---")[1]
}
return name
return name, name
}

View File

@ -196,7 +196,7 @@ func setRemoteAuthOptions(p provision.Provisioner) auth.Options {
}
func setContainerRuntimeOptions(name string, p miniProvisioner) error {
cluster := driver.ClusterNameFromMachine(name)
cluster, _ := driver.ClusterNameFromMachine(name)
c, err := config.Load(cluster)
if err != nil {
return errors.Wrap(err, "getting cluster config")