initial machine name refactor
parent
d19049b3ef
commit
3b04a8d85e
|
@ -35,7 +35,9 @@ import (
|
|||
"github.com/spf13/viper"
|
||||
pkgaddons "k8s.io/minikube/pkg/addons"
|
||||
"k8s.io/minikube/pkg/minikube/assets"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
pkg_config "k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
|
@ -80,7 +82,13 @@ var dashboardCmd = &cobra.Command{
|
|||
exit.WithError("Error getting client", err)
|
||||
}
|
||||
|
||||
if _, err = api.Load(cc.Name); err != nil {
|
||||
cp, err := config.PrimaryControlPlane(*cc)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting primary control plane", err)
|
||||
}
|
||||
|
||||
machineName := driver.MachineName(cc.Name, cp.Name)
|
||||
if _, err = api.Load(machineName); err != nil {
|
||||
switch err := errors.Cause(err).(type) {
|
||||
case mcnerror.ErrHostDoesNotExist:
|
||||
exit.WithCodeT(exit.Unavailable, "{{.name}} cluster does not exist", out.V{"name": cc.Name})
|
||||
|
@ -101,7 +109,7 @@ var dashboardCmd = &cobra.Command{
|
|||
exit.WithCodeT(exit.NoInput, "kubectl not found in PATH, but is required for the dashboard. Installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl/")
|
||||
}
|
||||
|
||||
if !machine.IsHostRunning(api, profileName) {
|
||||
if !machine.IsHostRunning(api, machineName) {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
@ -127,7 +135,7 @@ var dashboardCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
out.ErrT(out.Launch, "Launching proxy ...")
|
||||
p, hostPort, err := kubectlProxy(kubectl, cc.Name)
|
||||
p, hostPort, err := kubectlProxy(kubectl, machineName)
|
||||
if err != nil {
|
||||
exit.WithError("kubectl proxy", err)
|
||||
}
|
||||
|
|
|
@ -112,7 +112,7 @@ func runDelete(cmd *cobra.Command, args []string) {
|
|||
}
|
||||
|
||||
if deleteAll {
|
||||
if profileFlag != constants.DefaultMachineName {
|
||||
if profileFlag != constants.DefaultClusterName {
|
||||
exit.UsageT("usage: minikube delete --all")
|
||||
}
|
||||
delLabel := fmt.Sprintf("%s=%s", oci.CreatedByLabelKey, "true")
|
||||
|
@ -220,7 +220,7 @@ func deleteProfile(profile *pkg_config.Profile) error {
|
|||
}
|
||||
|
||||
if err == nil && driver.BareMetal(cc.Driver) {
|
||||
if err := uninstallKubernetes(api, profile.Name, cc.KubernetesConfig, viper.GetString(cmdcfg.Bootstrapper)); err != nil {
|
||||
if err := uninstallKubernetes(api, profile.Name, profile.Name, cc.KubernetesConfig, viper.GetString(cmdcfg.Bootstrapper)); err != nil {
|
||||
deletionError, ok := err.(DeletionError)
|
||||
if ok {
|
||||
delErr := profileDeletionErr(profile.Name, fmt.Sprintf("%v", err))
|
||||
|
@ -301,9 +301,9 @@ func profileDeletionErr(profileName string, additionalInfo string) error {
|
|||
return fmt.Errorf("error deleting profile \"%s\": %s", profileName, additionalInfo)
|
||||
}
|
||||
|
||||
func uninstallKubernetes(api libmachine.API, profile string, kc pkg_config.KubernetesConfig, bsName string) error {
|
||||
func uninstallKubernetes(api libmachine.API, profile string, nodeName string, kc pkg_config.KubernetesConfig, bsName string) error {
|
||||
out.T(out.Resetting, "Uninstalling Kubernetes {{.kubernetes_version}} using {{.bootstrapper_name}} ...", out.V{"kubernetes_version": kc.KubernetesVersion, "bootstrapper_name": bsName})
|
||||
clusterBootstrapper, err := cluster.Bootstrapper(api, bsName)
|
||||
clusterBootstrapper, err := cluster.Bootstrapper(api, bsName, nodeName)
|
||||
if err != nil {
|
||||
return DeletionError{Err: fmt.Errorf("unable to get bootstrapper: %v", err), Errtype: Fatal}
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
|
@ -43,19 +44,22 @@ var ipCmd = &cobra.Command{
|
|||
if err != nil {
|
||||
exit.WithError("Error getting config", err)
|
||||
}
|
||||
host, err := api.Load(cc.Name)
|
||||
if err != nil {
|
||||
switch err := errors.Cause(err).(type) {
|
||||
case mcnerror.ErrHostDoesNotExist:
|
||||
exit.WithCodeT(exit.NoInput, `"{{.profile_name}}" host does not exist, unable to show an IP`, out.V{"profile_name": cc.Name})
|
||||
default:
|
||||
exit.WithError("Error getting host", err)
|
||||
for _, n := range cc.Nodes {
|
||||
machineName := driver.MachineName(cc.Name, n.Name)
|
||||
host, err := api.Load(machineName)
|
||||
if err != nil {
|
||||
switch err := errors.Cause(err).(type) {
|
||||
case mcnerror.ErrHostDoesNotExist:
|
||||
exit.WithCodeT(exit.NoInput, `"{{.profile_name}}" host does not exist, unable to show an IP`, out.V{"profile_name": cc.Name})
|
||||
default:
|
||||
exit.WithError("Error getting host", err)
|
||||
}
|
||||
}
|
||||
ip, err := host.Driver.GetIP()
|
||||
if err != nil {
|
||||
exit.WithError("Error getting IP", err)
|
||||
}
|
||||
out.Ln(ip)
|
||||
}
|
||||
ip, err := host.Driver.GetIP()
|
||||
if err != nil {
|
||||
exit.WithError("Error getting IP", err)
|
||||
}
|
||||
out.Ln(ip)
|
||||
},
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/cluster"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/logs"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
|
@ -53,13 +54,19 @@ var logsCmd = &cobra.Command{
|
|||
exit.WithError("Error getting config", err)
|
||||
}
|
||||
|
||||
if nodeName == "" {
|
||||
nodeName = viper.GetString(config.MachineProfile)
|
||||
}
|
||||
|
||||
machineName := driver.MachineName(viper.GetString(config.MachineProfile), nodeName)
|
||||
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
exit.WithError("Error getting client", err)
|
||||
}
|
||||
defer api.Close()
|
||||
|
||||
h, err := api.Load(cfg.Name)
|
||||
h, err := api.Load(machineName)
|
||||
if err != nil {
|
||||
exit.WithError("api load", err)
|
||||
}
|
||||
|
@ -67,7 +74,7 @@ var logsCmd = &cobra.Command{
|
|||
if err != nil {
|
||||
exit.WithError("command runner", err)
|
||||
}
|
||||
bs, err := cluster.Bootstrapper(api, viper.GetString(cmdcfg.Bootstrapper))
|
||||
bs, err := cluster.Bootstrapper(api, viper.GetString(cmdcfg.Bootstrapper), nodeName)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting cluster bootstrapper", err)
|
||||
}
|
||||
|
@ -99,4 +106,5 @@ func init() {
|
|||
logsCmd.Flags().BoolVarP(&followLogs, "follow", "f", false, "Show only the most recent journal entries, and continuously print new entries as they are appended to the journal.")
|
||||
logsCmd.Flags().BoolVar(&showProblems, "problems", false, "Show only log entries which point to known problems")
|
||||
logsCmd.Flags().IntVarP(&numberOfLines, "length", "n", 60, "Number of lines back to go within the log")
|
||||
logsCmd.Flags().StringVarP(&nodeName, "node", "n", "", "The node to get logs from. Defaults to the primary control plane.")
|
||||
}
|
||||
|
|
|
@ -108,7 +108,12 @@ var mountCmd = &cobra.Command{
|
|||
if err != nil {
|
||||
exit.WithError("Error getting config", err)
|
||||
}
|
||||
host, err := api.Load(cc.Name)
|
||||
|
||||
cp, err := config.PrimaryControlPlane(*cc)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting primary cp", err)
|
||||
}
|
||||
host, err := api.Load(driver.MachineName(cc.Name, cp.Name))
|
||||
if err != nil {
|
||||
exit.WithError("Error loading api", err)
|
||||
}
|
||||
|
|
|
@ -505,7 +505,12 @@ func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) {
|
|||
return
|
||||
}
|
||||
|
||||
machineName := viper.GetString(config.MachineProfile)
|
||||
cp, err := config.PrimaryControlPlane(*existing)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting primary cp", err)
|
||||
}
|
||||
|
||||
machineName := driver.MachineName(viper.GetString(config.MachineProfile), cp.Name)
|
||||
h, err := api.Load(machineName)
|
||||
if err != nil {
|
||||
glog.Warningf("selectDriver api.Load: %v", err)
|
||||
|
|
|
@ -65,8 +65,8 @@ type Bootstrapper struct {
|
|||
}
|
||||
|
||||
// NewBootstrapper creates a new kubeadm.Bootstrapper
|
||||
func NewBootstrapper(api libmachine.API) (*Bootstrapper, error) {
|
||||
name := viper.GetString(config.MachineProfile)
|
||||
func NewBootstrapper(api libmachine.API, nodeName string) (*Bootstrapper, error) {
|
||||
name := driver.MachineName(viper.GetString(config.MachineProfile), nodeName)
|
||||
h, err := api.Load(name)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getting api client")
|
||||
|
@ -75,7 +75,7 @@ func NewBootstrapper(api libmachine.API) (*Bootstrapper, error) {
|
|||
if err != nil {
|
||||
return nil, errors.Wrap(err, "command runner")
|
||||
}
|
||||
return &Bootstrapper{c: runner, contextName: name, k8sClient: nil}, nil
|
||||
return &Bootstrapper{c: runner, contextName: viper.GetString(config.MachineProfile), k8sClient: nil}, nil
|
||||
}
|
||||
|
||||
// GetKubeletStatus returns the kubelet status
|
||||
|
|
|
@ -42,12 +42,12 @@ func init() {
|
|||
}
|
||||
|
||||
// Bootstrapper returns a new bootstrapper for the cluster
|
||||
func Bootstrapper(api libmachine.API, bootstrapperName string) (bootstrapper.Bootstrapper, error) {
|
||||
func Bootstrapper(api libmachine.API, bootstrapperName string, nodeName string) (bootstrapper.Bootstrapper, error) {
|
||||
var b bootstrapper.Bootstrapper
|
||||
var err error
|
||||
switch bootstrapperName {
|
||||
case bootstrapper.Kubeadm:
|
||||
b, err = kubeadm.NewBootstrapper(api)
|
||||
b, err = kubeadm.NewBootstrapper(api, nodeName)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getting a new kubeadm bootstrapper")
|
||||
}
|
||||
|
|
|
@ -34,7 +34,9 @@ const (
|
|||
// OldestKubernetesVersion is the oldest Kubernetes version to test against
|
||||
OldestKubernetesVersion = "v1.11.10"
|
||||
// DefaultMachineName is the default name for the VM
|
||||
DefaultMachineName = "minikube"
|
||||
DefaultMachineName = "minikube-minikube"
|
||||
// DefaultClusterName is the default nane for the k8s cluster
|
||||
DefaultClusterName = "minikube"
|
||||
// DefaultNodeName is the default name for the kubeadm node within the VM
|
||||
DefaultNodeName = "minikube"
|
||||
|
||||
|
|
|
@ -212,3 +212,8 @@ func SetLibvirtURI(v string) {
|
|||
os.Setenv("LIBVIRT_DEFAULT_URI", v)
|
||||
|
||||
}
|
||||
|
||||
//MachineName returns the name of the machine given the cluster and node names
|
||||
func MachineName(clusterName string, nodeName string) string {
|
||||
return fmt.Sprintf("%s-%s", clusterName, nodeName)
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/image"
|
||||
"k8s.io/minikube/pkg/minikube/localpath"
|
||||
"k8s.io/minikube/pkg/minikube/vmpath"
|
||||
|
@ -138,28 +139,32 @@ func CacheAndLoadImages(images []string) error {
|
|||
}
|
||||
for _, p := range profiles { // loading images to all running profiles
|
||||
pName := p.Name // capture the loop variable
|
||||
status, err := GetHostStatus(api, pName)
|
||||
c, err := config.Load(pName)
|
||||
if err != nil {
|
||||
glog.Warningf("skipping loading cache for profile %s", pName)
|
||||
glog.Errorf("error getting status for %s: %v", pName, err)
|
||||
continue // try next machine
|
||||
return err
|
||||
}
|
||||
if status == state.Running.String() { // the not running hosts will load on next start
|
||||
h, err := api.Load(pName)
|
||||
for _, n := range c.Nodes {
|
||||
nodeName := n.Name
|
||||
m := driver.MachineName(pName, nodeName)
|
||||
status, err := GetHostStatus(api, m)
|
||||
if err != nil {
|
||||
return err
|
||||
glog.Warningf("skipping loading cache for profile %s", pName)
|
||||
glog.Errorf("error getting status for %s: %v", pName, err)
|
||||
continue // try next machine
|
||||
}
|
||||
cr, err := CommandRunner(h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c, err := config.Load(pName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = LoadImages(c, cr, images, constants.ImageCacheDir)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to load cached images for profile %s. make sure the profile is running. %v", pName, err)
|
||||
if status == state.Running.String() { // the not running hosts will load on next start
|
||||
h, err := api.Load(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cr, err := CommandRunner(h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = LoadImages(c, cr, images, constants.ImageCacheDir)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to load cached images for profile %s. make sure the profile is running. %v", pName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -86,7 +86,7 @@ func showVersionInfo(k8sVersion string, cr cruntime.Manager) {
|
|||
|
||||
// setupKubeAdm adds any requested files into the VM before Kubernetes is started
|
||||
func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, node config.Node) bootstrapper.Bootstrapper {
|
||||
bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper))
|
||||
bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), node.Name)
|
||||
if err != nil {
|
||||
exit.WithError("Failed to get bootstrapper", err)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue