make sure machine name is properly backwards compatible

pull/6836/head
Sharif Elgamal 2020-03-03 13:49:01 -08:00
parent 95dd21d115
commit 3bb111a6fc
No known key found for this signature in database
GPG Key ID: 23CC0225BD9FD702
34 changed files with 81 additions and 71 deletions

View File

@ -78,7 +78,7 @@ var addonsOpenCmd = &cobra.Command{
if err != nil {
exit.WithError("Error getting control plane", err)
}
if !machine.IsHostRunning(api, driver.MachineName(*cc, cp.Name)) {
if !machine.IsHostRunning(api, driver.MachineName(*cc, cp)) {
os.Exit(1)
}
addon, ok := assets.Addons[addonName] // validate addon input

View File

@ -80,7 +80,7 @@ var printProfilesTable = func() {
if err != nil {
exit.WithError("error getting primary control plane", err)
}
p.Status, err = machine.GetHostStatus(api, driver.MachineName(*p.Config, cp.Name))
p.Status, err = machine.GetHostStatus(api, driver.MachineName(*p.Config, cp))
if err != nil {
glog.Warningf("error getting host status for %s: %v", p.Name, err)
}
@ -121,7 +121,7 @@ var printProfilesJSON = func() {
if err != nil {
exit.WithError("error getting primary control plane", err)
}
status, err := machine.GetHostStatus(api, driver.MachineName(*v.Config, cp.Name))
status, err := machine.GetHostStatus(api, driver.MachineName(*v.Config, cp))
if err != nil {
glog.Warningf("error getting host status for %s: %v", v.Name, err)
}

View File

@ -87,7 +87,7 @@ var dashboardCmd = &cobra.Command{
exit.WithError("Error getting primary control plane", err)
}
machineName := driver.MachineName(*cc, cp.Name)
machineName := driver.MachineName(*cc, cp)
if _, err = api.Load(machineName); err != nil {
switch err := errors.Cause(err).(type) {
case mcnerror.ErrHostDoesNotExist:

View File

@ -35,7 +35,6 @@ import (
"k8s.io/minikube/pkg/drivers/kic/oci"
"k8s.io/minikube/pkg/minikube/cluster"
"k8s.io/minikube/pkg/minikube/config"
pkg_config "k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/driver"
@ -96,7 +95,7 @@ func runDelete(cmd *cobra.Command, args []string) {
}
profileFlag := viper.GetString(config.ProfileName)
validProfiles, invalidProfiles, err := pkg_config.ListProfiles()
validProfiles, invalidProfiles, err := config.ListProfiles()
if err != nil {
glog.Warningf("'error loading profiles in minikube home %q: %v", localpath.MiniPath(), err)
}
@ -142,13 +141,13 @@ func runDelete(cmd *cobra.Command, args []string) {
exit.UsageT("usage: minikube delete")
}
profileName := viper.GetString(pkg_config.ProfileName)
profile, err := pkg_config.LoadProfile(profileName)
profileName := viper.GetString(config.ProfileName)
profile, err := config.LoadProfile(profileName)
if err != nil {
out.ErrT(out.Meh, `"{{.name}}" profile does not exist, trying anyways.`, out.V{"name": profileName})
}
errs := DeleteProfiles([]*pkg_config.Profile{profile})
errs := DeleteProfiles([]*config.Profile{profile})
if len(errs) > 0 {
HandleDeletionErrors(errs)
}
@ -169,7 +168,7 @@ func purgeMinikubeDirectory() {
}
// DeleteProfiles deletes one or more profiles
func DeleteProfiles(profiles []*pkg_config.Profile) []error {
func DeleteProfiles(profiles []*config.Profile) []error {
var errs []error
for _, profile := range profiles {
err := deleteProfile(profile)
@ -190,8 +189,8 @@ func DeleteProfiles(profiles []*pkg_config.Profile) []error {
return errs
}
func deleteProfile(profile *pkg_config.Profile) error {
viper.Set(pkg_config.ProfileName, profile.Name)
func deleteProfile(profile *config.Profile) error {
viper.Set(config.ProfileName, profile.Name)
delLabel := fmt.Sprintf("%s=%s", oci.ProfileLabelKey, profile.Name)
errs := oci.DeleteContainersByLabel(oci.Docker, delLabel)
@ -213,14 +212,14 @@ func deleteProfile(profile *pkg_config.Profile) error {
return DeletionError{Err: delErr, Errtype: Fatal}
}
defer api.Close()
cc, err := pkg_config.Load(profile.Name)
if err != nil && !pkg_config.IsNotExist(err) {
cc, err := config.Load(profile.Name)
if err != nil && !config.IsNotExist(err) {
delErr := profileDeletionErr(profile.Name, fmt.Sprintf("error loading profile config: %v", err))
return DeletionError{Err: delErr, Errtype: MissingProfile}
}
if err == nil && driver.BareMetal(cc.Driver) {
if err := uninstallKubernetes(api, profile.Name, *cc, viper.GetString(cmdcfg.Bootstrapper)); err != nil {
if err := uninstallKubernetes(api, *cc, cc.Nodes[0], viper.GetString(cmdcfg.Bootstrapper)); err != nil {
deletionError, ok := err.(DeletionError)
if ok {
delErr := profileDeletionErr(profile.Name, fmt.Sprintf("%v", err))
@ -236,7 +235,7 @@ func deleteProfile(profile *pkg_config.Profile) error {
}
for _, n := range cc.Nodes {
machineName := driver.MachineName(*cc, n.Name)
machineName := driver.MachineName(*cc, n)
if err = machine.DeleteHost(api, machineName); err != nil {
switch errors.Cause(err).(type) {
case mcnerror.ErrHostDoesNotExist:
@ -251,8 +250,8 @@ func deleteProfile(profile *pkg_config.Profile) error {
// In case DeleteHost didn't complete the job.
deleteProfileDirectory(profile.Name)
if err := pkg_config.DeleteProfile(profile.Name); err != nil {
if pkg_config.IsNotExist(err) {
if err := config.DeleteProfile(profile.Name); err != nil {
if config.IsNotExist(err) {
delErr := profileDeletionErr(profile.Name, fmt.Sprintf("\"%s\" profile does not exist", profile.Name))
return DeletionError{Err: delErr, Errtype: MissingProfile}
}
@ -272,17 +271,17 @@ func deleteContext(machineName string) error {
return DeletionError{Err: fmt.Errorf("update config: %v", err), Errtype: Fatal}
}
if err := cmdcfg.Unset(pkg_config.ProfileName); err != nil {
if err := cmdcfg.Unset(config.ProfileName); err != nil {
return DeletionError{Err: fmt.Errorf("unset minikube profile: %v", err), Errtype: Fatal}
}
return nil
}
func deleteInvalidProfile(profile *pkg_config.Profile) []error {
func deleteInvalidProfile(profile *config.Profile) []error {
out.T(out.DeletingHost, "Trying to delete invalid profile {{.profile}}", out.V{"profile": profile.Name})
var errs []error
pathToProfile := pkg_config.ProfileFolderPath(profile.Name, localpath.MiniPath())
pathToProfile := config.ProfileFolderPath(profile.Name, localpath.MiniPath())
if _, err := os.Stat(pathToProfile); !os.IsNotExist(err) {
err := os.RemoveAll(pathToProfile)
if err != nil {
@ -304,14 +303,14 @@ func profileDeletionErr(profileName string, additionalInfo string) error {
return fmt.Errorf("error deleting profile \"%s\": %s", profileName, additionalInfo)
}
func uninstallKubernetes(api libmachine.API, nodeName string, cc pkg_config.ClusterConfig, bsName string) error {
func uninstallKubernetes(api libmachine.API, cc config.ClusterConfig, n config.Node, bsName string) error {
out.T(out.Resetting, "Uninstalling Kubernetes {{.kubernetes_version}} using {{.bootstrapper_name}} ...", out.V{"kubernetes_version": cc.KubernetesConfig.KubernetesVersion, "bootstrapper_name": bsName})
clusterBootstrapper, err := cluster.Bootstrapper(api, bsName, cc, nodeName)
clusterBootstrapper, err := cluster.Bootstrapper(api, bsName, cc, n)
if err != nil {
return DeletionError{Err: fmt.Errorf("unable to get bootstrapper: %v", err), Errtype: Fatal}
}
host, err := machine.CheckIfHostExistsAndLoad(api, driver.MachineName(cc, nodeName))
host, err := machine.CheckIfHostExistsAndLoad(api, driver.MachineName(cc, n))
if err != nil {
exit.WithError("Error getting host", err)
}

View File

@ -149,7 +149,7 @@ var dockerEnvCmd = &cobra.Command{
exit.WithError("Error getting config", err)
}
for _, n := range cc.Nodes {
machineName := driver.MachineName(*cc, n.Name)
machineName := driver.MachineName(*cc, n)
host, err := machine.CheckIfHostExistsAndLoad(api, machineName)
if err != nil {
exit.WithError("Error getting host", err)

View File

@ -45,7 +45,7 @@ var ipCmd = &cobra.Command{
exit.WithError("Error getting config", err)
}
for _, n := range cc.Nodes {
machineName := driver.MachineName(*cc, n.Name)
machineName := driver.MachineName(*cc, n)
host, err := api.Load(machineName)
if err != nil {
switch err := errors.Cause(err).(type) {

View File

@ -27,6 +27,7 @@ import (
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/logs"
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/node"
)
const (
@ -59,7 +60,12 @@ var logsCmd = &cobra.Command{
nodeName = viper.GetString(config.ProfileName)
}
machineName := driver.MachineName(*cfg, nodeName)
n, _, err := node.Retrieve(cfg, nodeName)
if err != nil {
exit.WithError("Error retrieving node", err)
}
machineName := driver.MachineName(*cfg, *n)
api, err := machine.NewAPIClient()
if err != nil {
@ -75,7 +81,7 @@ var logsCmd = &cobra.Command{
if err != nil {
exit.WithError("command runner", err)
}
bs, err := cluster.Bootstrapper(api, viper.GetString(cmdcfg.Bootstrapper), *cfg, nodeName)
bs, err := cluster.Bootstrapper(api, viper.GetString(cmdcfg.Bootstrapper), *cfg, *n)
if err != nil {
exit.WithError("Error getting cluster bootstrapper", err)
}

View File

@ -113,7 +113,7 @@ var mountCmd = &cobra.Command{
if err != nil {
exit.WithError("Error getting primary cp", err)
}
host, err := api.Load(driver.MachineName(*cc, cp.Name))
host, err := api.Load(driver.MachineName(*cc, cp))
if err != nil {
exit.WithError("Error loading api", err)
}

View File

@ -23,6 +23,7 @@ import (
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/node"
"k8s.io/minikube/pkg/minikube/out"
)
@ -47,7 +48,12 @@ var nodeStopCmd = &cobra.Command{
exit.WithError("getting config", err)
}
machineName := driver.MachineName(*cc, name)
n, _, err := node.Retrieve(cc, name)
if err != nil {
exit.WithError("retrieving node", err)
}
machineName := driver.MachineName(*cc, *n)
err = machine.StopHost(api, machineName)
if err != nil {

View File

@ -66,7 +66,7 @@ func runPause(cmd *cobra.Command, args []string) {
glog.Infof("config: %+v", cc)
for _, n := range cc.Nodes {
host, err := machine.CheckIfHostExistsAndLoad(api, driver.MachineName(*cc, n.Name))
host, err := machine.CheckIfHostExistsAndLoad(api, driver.MachineName(*cc, n))
if err != nil {
exit.WithError("Error getting host", err)
}

View File

@ -120,7 +120,7 @@ var podmanEnvCmd = &cobra.Command{
exit.WithError("Error getting config", err)
}
for _, n := range cc.Nodes {
machineName := driver.MachineName(*cc, n.Name)
machineName := driver.MachineName(*cc, n)
host, err := machine.CheckIfHostExistsAndLoad(api, machineName)
if err != nil {
exit.WithError("Error getting host", err)

View File

@ -92,7 +92,7 @@ var serviceCmd = &cobra.Command{
if err != nil {
exit.WithError("Error getting control plane", err)
}
machineName := driver.MachineName(*cfg, cp.Name)
machineName := driver.MachineName(*cfg, cp)
if !machine.IsHostRunning(api, machineName) {
os.Exit(1)
}

View File

@ -56,7 +56,7 @@ var serviceListCmd = &cobra.Command{
if err != nil {
exit.WithError("Error getting primary control plane", err)
}
if !machine.IsHostRunning(api, driver.MachineName(*cfg, cp.Name)) {
if !machine.IsHostRunning(api, driver.MachineName(*cfg, cp)) {
exit.WithCodeT(exit.Unavailable, "profile {{.name}} is not running.", out.V{"name": profileName})
}
serviceURLs, err := service.GetServiceURLs(api, serviceListNamespace, serviceURLTemplate)

View File

@ -54,7 +54,7 @@ var sshCmd = &cobra.Command{
if err != nil {
exit.WithError("Error getting primary control plane", err)
}
host, err := machine.CheckIfHostExistsAndLoad(api, driver.MachineName(*cc, cp.Name))
host, err := machine.CheckIfHostExistsAndLoad(api, driver.MachineName(*cc, cp))
if err != nil {
exit.WithError("Error getting host", err)
}
@ -67,7 +67,7 @@ var sshCmd = &cobra.Command{
ssh.SetDefaultClient(ssh.External)
}
err = machine.CreateSSHShell(api, *cc, cp.Name, args)
err = machine.CreateSSHShell(api, *cc, cp, args)
if err != nil {
// This is typically due to a non-zero exit code, so no need for flourish.
out.ErrLn("ssh: %v", err)

View File

@ -510,7 +510,7 @@ func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) {
exit.WithError("Error getting primary cp", err)
}
machineName := driver.MachineName(*existing, cp.Name)
machineName := driver.MachineName(*existing, cp)
h, err := api.Load(machineName)
if err != nil {
glog.Warningf("selectDriver api.Load: %v", err)

View File

@ -106,7 +106,7 @@ var statusCmd = &cobra.Command{
exit.WithError("getting primary control plane", err)
}
machineName := driver.MachineName(*cc, cp.Name)
machineName := driver.MachineName(*cc, cp)
st, err := status(api, machineName)
if err != nil {
glog.Errorf("status error: %v", err)

View File

@ -59,7 +59,7 @@ func runStop(cmd *cobra.Command, args []string) {
}
for _, n := range cc.Nodes {
nonexistent := stop(api, *cc, n.Name)
nonexistent := stop(api, *cc, n)
if !nonexistent {
out.T(out.Stopped, `"{{.node_name}}" stopped.`, out.V{"node_name": n.Name})
@ -76,7 +76,7 @@ func runStop(cmd *cobra.Command, args []string) {
}
}
func stop(api libmachine.API, cluster config.ClusterConfig, n string) bool {
func stop(api libmachine.API, cluster config.ClusterConfig, n config.Node) bool {
nonexistent := false
stop := func() (err error) {
machineName := driver.MachineName(cluster, n)

View File

@ -57,7 +57,7 @@ var unpauseCmd = &cobra.Command{
glog.Infof("config: %+v", cc)
for _, n := range cc.Nodes {
machineName := driver.MachineName(*cc, n.Name)
machineName := driver.MachineName(*cc, n)
host, err := machine.CheckIfHostExistsAndLoad(api, machineName)
if err != nil {
exit.WithError("Error getting host", err)

View File

@ -256,7 +256,7 @@ func enableOrDisableStorageClasses(name, val, profile string) error {
if err != nil {
return errors.Wrap(err, "getting control plane")
}
if !machine.IsHostRunning(api, driver.MachineName(*cc, cp.Name)) {
if !machine.IsHostRunning(api, driver.MachineName(*cc, cp)) {
glog.Warningf("%q is not running, writing %s=%v to disk and skipping enablement", profile, name, val)
return enableOrDisableAddon(name, val, profile)
}

View File

@ -64,8 +64,8 @@ type Bootstrapper struct {
}
// NewBootstrapper creates a new kubeadm.Bootstrapper
func NewBootstrapper(api libmachine.API, cc config.ClusterConfig, nodeName string) (*Bootstrapper, error) {
name := driver.MachineName(cc, nodeName)
func NewBootstrapper(api libmachine.API, cc config.ClusterConfig, n config.Node) (*Bootstrapper, error) {
name := driver.MachineName(cc, n)
h, err := api.Load(name)
if err != nil {
return nil, errors.Wrap(err, "getting api client")

View File

@ -43,12 +43,12 @@ func init() {
}
// Bootstrapper returns a new bootstrapper for the cluster
func Bootstrapper(api libmachine.API, bootstrapperName string, cc config.ClusterConfig, nodeName string) (bootstrapper.Bootstrapper, error) {
func Bootstrapper(api libmachine.API, bootstrapperName string, cc config.ClusterConfig, n config.Node) (bootstrapper.Bootstrapper, error) {
var b bootstrapper.Bootstrapper
var err error
switch bootstrapperName {
case bootstrapper.Kubeadm:
b, err = kubeadm.NewBootstrapper(api, cc, nodeName)
b, err = kubeadm.NewBootstrapper(api, cc, n)
if err != nil {
return nil, errors.Wrap(err, "getting a new kubeadm bootstrapper")
}

View File

@ -215,10 +215,10 @@ func SetLibvirtURI(v string) {
}
//MachineName returns the name of the machine given the cluster and node names
func MachineName(cc config.ClusterConfig, nodeName string) string {
func MachineName(cc config.ClusterConfig, n config.Node) string {
// For single node cluster, default to back to old naming
if len(cc.Nodes) == 1 {
if len(cc.Nodes) == 1 || n.ControlPlane {
return cc.Name
}
return fmt.Sprintf("%s-%s", cc.Name, nodeName)
return fmt.Sprintf("%s-%s", cc.Name, n.Name)
}

View File

@ -174,8 +174,7 @@ func CacheAndLoadImages(images []string) error {
return err
}
for _, n := range c.Nodes {
nodeName := n.Name
m := driver.MachineName(*c, nodeName)
m := driver.MachineName(*c, n)
status, err := GetHostStatus(api, m)
if err != nil {
glog.Warningf("skipping loading cache for profile %s", pName)

View File

@ -320,7 +320,7 @@ func TestStopHost(t *testing.T) {
cc := defaultClusterConfig
cc.Name = viper.GetString("profile")
m := driver.MachineName(cc, defaultNodeConfig.Name)
m := driver.MachineName(cc, defaultNodeConfig)
if err := StopHost(api, m); err != nil {
t.Fatalf("Unexpected error stopping machine: %v", err)
}
@ -339,7 +339,7 @@ func TestDeleteHost(t *testing.T) {
cc := defaultClusterConfig
cc.Name = viper.GetString("profile")
if err := DeleteHost(api, driver.MachineName(cc, viper.GetString("profile"))); err != nil {
if err := DeleteHost(api, driver.MachineName(cc, defaultNodeConfig)); err != nil {
t.Fatalf("Unexpected error deleting host: %v", err)
}
}
@ -355,7 +355,7 @@ func TestDeleteHostErrorDeletingVM(t *testing.T) {
d := &tests.MockDriver{RemoveError: true, T: t}
h.Driver = d
if err := DeleteHost(api, driver.MachineName(defaultClusterConfig, viper.GetString("profile"))); err == nil {
if err := DeleteHost(api, driver.MachineName(defaultClusterConfig, defaultNodeConfig)); err == nil {
t.Fatal("Expected error deleting host.")
}
}
@ -368,7 +368,7 @@ func TestDeleteHostErrorDeletingFiles(t *testing.T) {
t.Errorf("createHost failed: %v", err)
}
if err := DeleteHost(api, driver.MachineName(defaultClusterConfig, viper.GetString("profile"))); err == nil {
if err := DeleteHost(api, driver.MachineName(defaultClusterConfig, defaultNodeConfig)); err == nil {
t.Fatal("Expected error deleting host.")
}
}
@ -383,7 +383,7 @@ func TestDeleteHostErrMachineNotExist(t *testing.T) {
t.Errorf("createHost failed: %v", err)
}
if err := DeleteHost(api, driver.MachineName(defaultClusterConfig, viper.GetString("profile"))); err == nil {
if err := DeleteHost(api, driver.MachineName(defaultClusterConfig, defaultNodeConfig)); err == nil {
t.Fatal("Expected error deleting host.")
}
}
@ -395,7 +395,7 @@ func TestGetHostStatus(t *testing.T) {
cc := defaultClusterConfig
cc.Name = viper.GetString("profile")
m := driver.MachineName(cc, viper.GetString("profile"))
m := driver.MachineName(cc, defaultNodeConfig)
checkState := func(expected string, machineName string) {
s, err := GetHostStatus(api, machineName)
@ -415,7 +415,7 @@ func TestGetHostStatus(t *testing.T) {
cc.Name = viper.GetString("profile")
m = driver.MachineName(cc, viper.GetString("profile"))
m = driver.MachineName(cc, defaultNodeConfig)
checkState(state.Running.String(), m)
@ -449,7 +449,7 @@ func TestCreateSSHShell(t *testing.T) {
cc.Name = viper.GetString("profile")
cliArgs := []string{"exit"}
if err := CreateSSHShell(api, cc, defaultNodeConfig.Name, cliArgs); err != nil {
if err := CreateSSHShell(api, cc, defaultNodeConfig, cliArgs); err != nil {
t.Fatalf("Error running ssh command: %v", err)
}

View File

@ -63,7 +63,7 @@ func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.
glog.Infof("fixHost completed within %s", time.Since(start))
}()
h, err := api.Load(driver.MachineName(cc, n.Name))
h, err := api.Load(driver.MachineName(cc, n))
if err != nil {
return h, errors.Wrap(err, "Error loading existing host. Please try running [minikube delete], then run [minikube start] again.")
}

View File

@ -124,8 +124,8 @@ func machineDirs(miniHome ...string) (dirs []string, err error) {
}
// CreateSSHShell creates a new SSH shell / client
func CreateSSHShell(api libmachine.API, cc config.ClusterConfig, nodeName string, args []string) error {
machineName := driver.MachineName(cc, nodeName)
func CreateSSHShell(api libmachine.API, cc config.ClusterConfig, n config.Node, args []string) error {
machineName := driver.MachineName(cc, n)
host, err := CheckIfHostExistsAndLoad(api, machineName)
if err != nil {
return errors.Wrap(err, "host exists and load")

View File

@ -86,7 +86,7 @@ func showVersionInfo(k8sVersion string, cr cruntime.Manager) {
// setupKubeAdm adds any requested files into the VM before Kubernetes is started
func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, node config.Node) bootstrapper.Bootstrapper {
bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cfg, node.Name)
bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cfg, node)
if err != nil {
exit.WithError("Failed to get bootstrapper", err)
}

View File

@ -71,7 +71,7 @@ func Add(cc *config.ClusterConfig, name string, controlPlane bool, worker bool,
// Delete stops and deletes the given node from the given cluster
func Delete(cc config.ClusterConfig, name string) error {
_, index, err := Retrieve(&cc, name)
n, index, err := Retrieve(&cc, name)
if err != nil {
return err
}
@ -81,7 +81,7 @@ func Delete(cc config.ClusterConfig, name string) error {
return err
}
err = machine.DeleteHost(api, driver.MachineName(cc, name))
err = machine.DeleteHost(api, driver.MachineName(cc, *n))
if err != nil {
return err
}

View File

@ -45,7 +45,7 @@ func init() {
func configure(cc config.ClusterConfig, n config.Node) (interface{}, error) {
return kic.NewDriver(kic.Config{
MachineName: driver.MachineName(cc, n.Name),
MachineName: driver.MachineName(cc, n),
StorePath: localpath.MiniPath(),
ImageDigest: kic.BaseImage,
CPU: cc.CPUs,

View File

@ -68,7 +68,7 @@ type kvmDriver struct {
}
func configure(cc config.ClusterConfig, n config.Node) (interface{}, error) {
name := driver.MachineName(cc, n.Name)
name := driver.MachineName(cc, n)
return kvmDriver{
BaseDriver: &drivers.BaseDriver{
MachineName: name,

View File

@ -44,7 +44,7 @@ func init() {
func configure(cc config.ClusterConfig, n config.Node) (interface{}, error) {
return none.NewDriver(none.Config{
MachineName: driver.MachineName(cc, n.Name),
MachineName: driver.MachineName(cc, n),
StorePath: localpath.MiniPath(),
ContainerRuntime: cc.KubernetesConfig.ContainerRuntime,
}), nil

View File

@ -51,7 +51,7 @@ func init() {
func configure(cc config.ClusterConfig, n config.Node) (interface{}, error) {
return kic.NewDriver(kic.Config{
MachineName: driver.MachineName(cc, n.Name),
MachineName: driver.MachineName(cc, n),
StorePath: localpath.MiniPath(),
ImageDigest: strings.Split(kic.BaseImage, "@")[0], // for podman does not support docker images references with both a tag and digest.
CPU: cc.CPUs,

View File

@ -50,7 +50,7 @@ func init() {
}
func configure(cc config.ClusterConfig, n config.Node) (interface{}, error) {
d := virtualbox.NewDriver(driver.MachineName(cc, n.Name), localpath.MiniPath())
d := virtualbox.NewDriver(driver.MachineName(cc, n), localpath.MiniPath())
d.Boot2DockerURL = cc.Downloader.GetISOFileURI(cc.MinikubeISO)
d.Memory = cc.Memory
d.CPU = cc.CPUs

View File

@ -40,7 +40,7 @@ func init() {
}
func configure(cc config.ClusterConfig, n config.Node) (interface{}, error) {
d := vmwcfg.NewConfig(driver.MachineName(cc, n.Name), localpath.MiniPath())
d := vmwcfg.NewConfig(driver.MachineName(cc, n), localpath.MiniPath())
d.Boot2DockerURL = cc.Downloader.GetISOFileURI(cc.MinikubeISO)
d.Memory = cc.Memory
d.CPU = cc.CPUs