i think it works?

pull/6787/head
Sharif Elgamal 2020-02-25 16:36:53 -08:00
parent 39f03bc925
commit 0fadf91d2c
18 changed files with 94 additions and 72 deletions

View File

@ -221,21 +221,14 @@ func deleteProfile(profile *pkg_config.Profile) error {
}
if err == nil && driver.BareMetal(cc.Driver) {
var e error
for _, n := range cc.Nodes {
if err := uninstallKubernetes(api, profile.Name, cc.KubernetesConfig, viper.GetString(cmdcfg.Bootstrapper), n.Name); err != nil {
deletionError, ok := err.(DeletionError)
if ok {
delErr := profileDeletionErr(profile.Name, fmt.Sprintf("%v", err))
deletionError.Err = delErr
e = deletionError
} else {
e = err
}
if err := uninstallKubernetes(api, profile.Name, cc.KubernetesConfig, viper.GetString(cmdcfg.Bootstrapper), cc.Nodes[0].Name); err != nil {
deletionError, ok := err.(DeletionError)
if ok {
delErr := profileDeletionErr(profile.Name, fmt.Sprintf("%v", err))
deletionError.Err = delErr
return deletionError
}
}
if e != nil {
return e
return err
}
}
@ -243,13 +236,15 @@ func deleteProfile(profile *pkg_config.Profile) error {
out.T(out.FailureType, "Failed to kill mount process: {{.error}}", out.V{"error": err})
}
if err = machine.DeleteHost(api, profile.Name); err != nil {
switch errors.Cause(err).(type) {
case mcnerror.ErrHostDoesNotExist:
glog.Infof("%s cluster does not exist. Proceeding ahead with cleanup.", profile.Name)
default:
out.T(out.FailureType, "Failed to delete cluster: {{.error}}", out.V{"error": err})
out.T(out.Notice, `You may need to manually remove the "{{.name}}" VM from your hypervisor`, out.V{"name": profile.Name})
for _, n := range cc.Nodes {
if err = machine.DeleteHost(api, n.Name); err != nil {
switch errors.Cause(err).(type) {
case mcnerror.ErrHostDoesNotExist:
glog.Infof("%s cluster does not exist. Proceeding ahead with cleanup.", profile.Name)
default:
out.T(out.FailureType, "Failed to delete cluster: {{.error}}", out.V{"error": err})
out.T(out.Notice, `You may need to manually remove the "{{.name}}" VM from your hypervisor`, out.V{"name": profile.Name})
}
}
}
@ -311,7 +306,7 @@ func profileDeletionErr(profileName string, additionalInfo string) error {
func uninstallKubernetes(api libmachine.API, profile string, kc pkg_config.KubernetesConfig, bsName string, nodeName string) error {
out.T(out.Resetting, "Uninstalling Kubernetes {{.kubernetes_version}} using {{.bootstrapper_name}} ...", out.V{"kubernetes_version": kc.KubernetesVersion, "bootstrapper_name": bsName})
clusterBootstrapper, err := cluster.Bootstrapper(api, bsName, nodeName)
clusterBootstrapper, err := cluster.Bootstrapper(api, bsName, profile, nodeName)
if err != nil {
return DeletionError{Err: fmt.Errorf("unable to get bootstrapper: %v", err), Errtype: Fatal}
}

View File

@ -67,7 +67,7 @@ var logsCmd = &cobra.Command{
if err != nil {
exit.WithError("command runner", err)
}
bs, err := cluster.Bootstrapper(api, viper.GetString(cmdcfg.Bootstrapper), viper.GetString(config.MachineProfile))
bs, err := cluster.Bootstrapper(api, viper.GetString(cmdcfg.Bootstrapper), viper.GetString(config.MachineProfile), viper.GetString(config.MachineProfile))
if err != nil {
exit.WithError("Error getting cluster bootstrapper", err)
}

View File

@ -55,10 +55,13 @@ const (
// Nonexistent means nonexistent
Nonexistent = "Nonexistent" // ~state.None
// Irrelevant is used for statuses that aren't meaningful for worker nodes
Irrelevant = "Irrelevant"
)
// Status holds string representations of component states
type Status struct {
Name string
Host string
Kubelet string
APIServer string
@ -69,7 +72,8 @@ const (
minikubeNotRunningStatusFlag = 1 << 0
clusterNotRunningStatusFlag = 1 << 1
k8sNotRunningStatusFlag = 1 << 2
defaultStatusFormat = `host: {{.Host}}
defaultStatusFormat = `{{.Name}}
host: {{.Host}}
kubelet: {{.Kubelet}}
apiserver: {{.APIServer}}
kubeconfig: {{.Kubeconfig}}
@ -95,26 +99,35 @@ var statusCmd = &cobra.Command{
}
defer api.Close()
machineName := viper.GetString(config.MachineProfile)
st, err := status(api, machineName)
cluster := viper.GetString(config.MachineProfile)
cc, err := config.Load(cluster)
if err != nil {
glog.Errorf("status error: %v", err)
}
if st.Host == Nonexistent {
glog.Errorf("The %q cluster does not exist!", machineName)
exit.WithError("getting config", err)
}
switch strings.ToLower(output) {
case "text":
if err := statusText(st, os.Stdout); err != nil {
exit.WithError("status text failure", err)
var st *Status
for _, n := range cc.Nodes {
machineName := fmt.Sprintf("%s-%s", cluster, n.Name)
st, err = status(api, machineName, n.ControlPlane)
if err != nil {
glog.Errorf("status error: %v", err)
}
case "json":
if err := statusJSON(st, os.Stdout); err != nil {
exit.WithError("status json failure", err)
if st.Host == Nonexistent {
glog.Errorf("The %q host does not exist!", machineName)
}
switch strings.ToLower(output) {
case "text":
if err := statusText(st, os.Stdout); err != nil {
exit.WithError("status text failure", err)
}
case "json":
if err := statusJSON(st, os.Stdout); err != nil {
exit.WithError("status json failure", err)
}
default:
exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'text', 'json'", output))
}
default:
exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'text', 'json'", output))
}
os.Exit(exitCode(st))
@ -126,17 +139,22 @@ func exitCode(st *Status) int {
if st.Host != state.Running.String() {
c |= minikubeNotRunningStatusFlag
}
if st.APIServer != state.Running.String() || st.Kubelet != state.Running.String() {
if (st.APIServer != state.Running.String() && st.APIServer != Irrelevant) || st.Kubelet != state.Running.String() {
c |= clusterNotRunningStatusFlag
}
if st.Kubeconfig != Configured {
if st.Kubeconfig != Configured && st.Kubeconfig != Irrelevant {
c |= k8sNotRunningStatusFlag
}
return c
}
func status(api libmachine.API, name string) (*Status, error) {
func status(api libmachine.API, name string, controlPlane bool) (*Status, error) {
profile := strings.Split(name, "-")[0]
node := strings.Split(name, "-")[1]
st := &Status{
Name: node,
Host: Nonexistent,
APIServer: Nonexistent,
Kubelet: Nonexistent,
@ -179,10 +197,17 @@ func status(api libmachine.API, name string) (*Status, error) {
}
st.Kubeconfig = Misconfigured
ok, err := kubeconfig.IsClusterInConfig(ip, name)
glog.Infof("%s is in kubeconfig at ip %s: %v (err=%v)", name, ip, ok, err)
if ok {
st.Kubeconfig = Configured
if !controlPlane {
st.Kubeconfig = Irrelevant
st.APIServer = Irrelevant
}
if st.Kubeconfig != Irrelevant {
ok, err := kubeconfig.IsClusterInConfig(ip, profile)
glog.Infof("%s is in kubeconfig at ip %s: %v (err=%v)", name, ip, ok, err)
if ok {
st.Kubeconfig = Configured
}
}
host, err := machine.CheckIfHostExistsAndLoad(api, name)
@ -205,14 +230,16 @@ func status(api libmachine.API, name string) (*Status, error) {
st.Kubelet = stk.String()
}
sta, err := kverify.APIServerStatus(cr, ip, port)
glog.Infof("%s apiserver status = %s (err=%v)", name, stk, err)
if st.APIServer != Irrelevant {
sta, err := kverify.APIServerStatus(cr, ip, port)
glog.Infof("%s apiserver status = %s (err=%v)", name, stk, err)
if err != nil {
glog.Errorln("Error apiserver status:", err)
st.APIServer = state.Error.String()
} else {
st.APIServer = sta.String()
if err != nil {
glog.Errorln("Error apiserver status:", err)
st.APIServer = state.Error.String()
} else {
st.APIServer = sta.String()
}
}
return st, nil

View File

@ -65,7 +65,6 @@ type Driver struct {
UUID string
VpnKitSock string
VSockPorts []string
ClusterName string
}
// NewDriver creates a new driver for a host
@ -200,7 +199,7 @@ func (d *Driver) Restart() error {
}
func (d *Driver) createHost() (*hyperkit.HyperKit, error) {
stateDir := filepath.Join(d.StorePath, "machines", d.ClusterName, d.MachineName)
stateDir := filepath.Join(d.StorePath, "machines", d.MachineName)
h, err := hyperkit.New("", d.VpnKitSock, stateDir)
if err != nil {
return nil, errors.Wrap(err, "new-ing Hyperkit")

View File

@ -36,6 +36,7 @@ import (
"github.com/docker/machine/libmachine/state"
"github.com/golang/glog"
"github.com/pkg/errors"
"github.com/spf13/viper"
"k8s.io/client-go/kubernetes"
kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/minikube/pkg/drivers/kic"
@ -73,7 +74,7 @@ func NewBootstrapper(api libmachine.API, name string) (*Bootstrapper, error) {
if err != nil {
return nil, errors.Wrap(err, "command runner")
}
return &Bootstrapper{c: runner, contextName: name, k8sClient: nil}, nil
return &Bootstrapper{c: runner, contextName: viper.GetString(config.MachineProfile), k8sClient: nil}, nil
}
// GetKubeletStatus returns the kubelet status

View File

@ -42,12 +42,12 @@ func init() {
}
// Bootstrapper returns a new bootstrapper for the cluster
func Bootstrapper(api libmachine.API, bootstrapperName string, machineName string) (bootstrapper.Bootstrapper, error) {
func Bootstrapper(api libmachine.API, bootstrapperName string, cluster string, nodeName string) (bootstrapper.Bootstrapper, error) {
var b bootstrapper.Bootstrapper
var err error
switch bootstrapperName {
case bootstrapper.Kubeadm:
b, err = kubeadm.NewBootstrapper(api, machineName)
b, err = kubeadm.NewBootstrapper(api, fmt.Sprintf("%s-%s", cluster, nodeName))
if err != nil {
return nil, errors.Wrap(err, "getting a new kubeadm bootstrapper")
}

View File

@ -103,7 +103,7 @@ func InitialSetup(cc config.ClusterConfig, n config.Node, existingAddons map[str
// setupKubeAdm adds any requested files into the VM before Kubernetes is started
func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node) bootstrapper.Bootstrapper {
bs, err := Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), n.Name)
bs, err := Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cfg.Name, n.Name)
if err != nil {
exit.WithError("Failed to get bootstrapper", err)
}

View File

@ -42,7 +42,7 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo
runner, preExists, mAPI, _ := cluster.StartMachine(&cc, &n)
defer mAPI.Close()
bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), n.Name)
bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cc.Name, n.Name)
if err != nil {
exit.WithError("Failed to get bootstrapper", err)
}
@ -87,7 +87,7 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo
if err != nil {
exit.WithError("Getting primary control plane", err)
}
cpBs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cp.Name)
cpBs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cc.Name, cp.Name)
if err != nil {
exit.WithError("Getting bootstrapper", err)
}

View File

@ -45,7 +45,7 @@ func init() {
func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) {
return kic.NewDriver(kic.Config{
MachineName: n.Name,
MachineName: fmt.Sprintf("%s-%s", mc.Name, n.Name),
StorePath: localpath.MiniPath(),
ImageDigest: kic.BaseImage,
CPU: mc.CPUs,

View File

@ -65,11 +65,10 @@ func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) {
return &hyperkit.Driver{
BaseDriver: &drivers.BaseDriver{
MachineName: cfg.Name,
MachineName: fmt.Sprintf("%s-%s", cfg.Name, n.Name),
StorePath: localpath.MiniPath(),
SSHUser: "docker",
},
ClusterName: cfg.Name,
Boot2DockerURL: cfg.Downloader.GetISOFileURI(cfg.MinikubeISO),
DiskSize: cfg.DiskSize,
Memory: cfg.Memory,

View File

@ -53,7 +53,7 @@ func init() {
}
func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) {
d := hyperv.NewDriver(n.Name, localpath.MiniPath())
d := hyperv.NewDriver(fmt.Sprintf("%s-%s", mc.Name, n.Name), localpath.MiniPath())
d.Boot2DockerURL = cfg.Downloader.GetISOFileURI(cfg.MinikubeISO)
d.VSwitch = cfg.HypervVirtualSwitch
if d.VSwitch == "" && cfg.HypervUseExternalSwitch {

View File

@ -68,7 +68,7 @@ type kvmDriver struct {
}
func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) {
name := n.Name
name := fmt.Sprintf("%s-%s", mc.Name, n.Name)
return kvmDriver{
BaseDriver: &drivers.BaseDriver{
MachineName: name,

View File

@ -45,7 +45,7 @@ func init() {
}
func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) {
d := parallels.NewDriver(n.Name, localpath.MiniPath()).(*parallels.Driver)
d := parallels.NewDriver(fmt.Sprintf("%s-%s", cfg.Name, n.Name), localpath.MiniPath()).(*parallels.Driver)
d.Boot2DockerURL = cfg.Downloader.GetISOFileURI(cfg.MinikubeISO)
d.Memory = cfg.Memory
d.CPU = cfg.CPUs

View File

@ -51,7 +51,7 @@ func init() {
func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) {
return kic.NewDriver(kic.Config{
MachineName: n.Name,
MachineName: fmt.Sprintf("%s-%s", mc.Name, n.Name),
StorePath: localpath.MiniPath(),
ImageDigest: strings.Split(kic.BaseImage, "@")[0], // for podman does not support docker images references with both a tag and digest.
CPU: mc.CPUs,

View File

@ -50,7 +50,7 @@ func init() {
}
func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) {
d := virtualbox.NewDriver(n.Name, localpath.MiniPath())
d := virtualbox.NewDriver(fmt.Sprintf("%s-%s", mc.Name, n.Name), localpath.MiniPath())
d.Boot2DockerURL = mc.Downloader.GetISOFileURI(mc.MinikubeISO)
d.Memory = mc.Memory
d.CPU = mc.CPUs

View File

@ -40,7 +40,7 @@ func init() {
}
func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) {
d := vmwcfg.NewConfig(n.Name, localpath.MiniPath())
d := vmwcfg.NewConfig(fmt.Sprintf("%s-%s", mc.Name, n.Name), localpath.MiniPath())
d.Boot2DockerURL = mc.Downloader.GetISOFileURI(mc.MinikubeISO)
d.Memory = mc.Memory
d.CPU = mc.CPUs

View File

@ -45,7 +45,7 @@ func init() {
}
func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) {
d := vmwarefusion.NewDriver(n.Name, localpath.MiniPath()).(*vmwarefusion.Driver)
d := vmwarefusion.NewDriver(fmt.Sprintf("%s-%s", cfg.Name, n.Name), localpath.MiniPath()).(*vmwarefusion.Driver)
d.Boot2DockerURL = cfg.Downloader.GetISOFileURI(cfg.MinikubeISO)
d.Memory = cfg.Memory
d.CPU = cfg.CPUs

View File

@ -195,7 +195,8 @@ func setRemoteAuthOptions(p provision.Provisioner) auth.Options {
}
func setContainerRuntimeOptions(name string, p miniProvisioner) error {
c, err := config.Load(name)
cluster := strings.Split(name, "-")[0]
c, err := config.Load(cluster)
if err != nil {
return errors.Wrap(err, "getting cluster config")
}