Migrate MachineName function.

Migrate MachineName function from driver package to config package.
pull/10114/head
Daehyeok Mun 2021-01-08 11:26:10 -08:00
parent 857e0a2089
commit 36d94a2d88
38 changed files with 122 additions and 125 deletions

View File

@ -92,7 +92,7 @@ func profileStatus(p *config.Profile, api libmachine.API) string {
exit.Error(reason.GuestCpConfig, "error getting primary control plane", err)
}
host, err := machine.LoadHost(api, driver.MachineName(*p.Config, cp))
host, err := machine.LoadHost(api, config.MachineName(*p.Config, cp))
if err != nil {
klog.Warningf("error loading profiles: %v", err)
return "Unknown"

View File

@ -288,7 +288,7 @@ func deleteProfile(profile *config.Profile) error {
if driver.IsKIC(profile.Config.Driver) {
out.Step(style.DeletingHost, `Deleting "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": profile.Name, "driver_name": profile.Config.Driver})
for _, n := range profile.Config.Nodes {
machineName := driver.MachineName(*profile.Config, n)
machineName := config.MachineName(*profile.Config, n)
deletePossibleKicLeftOver(machineName, profile.Config.Driver)
}
}
@ -347,7 +347,7 @@ func deleteHosts(api libmachine.API, cc *config.ClusterConfig) {
if cc != nil {
for _, n := range cc.Nodes {
machineName := driver.MachineName(*cc, n)
machineName := config.MachineName(*cc, n)
if err := machine.DeleteHost(api, machineName); err != nil {
switch errors.Cause(err).(type) {
case mcnerror.ErrHostDoesNotExist:
@ -412,7 +412,7 @@ func profileDeletionErr(cname string, additionalInfo string) error {
func uninstallKubernetes(api libmachine.API, cc config.ClusterConfig, n config.Node, bsName string) error {
out.Step(style.Resetting, "Uninstalling Kubernetes {{.kubernetes_version}} using {{.bootstrapper_name}} ...", out.V{"kubernetes_version": cc.KubernetesConfig.KubernetesVersion, "bootstrapper_name": bsName})
host, err := machine.LoadHost(api, driver.MachineName(cc, n))
host, err := machine.LoadHost(api, config.MachineName(cc, n))
if err != nil {
return DeletionError{Err: fmt.Errorf("unable to load host: %v", err), Errtype: MissingCluster}
}
@ -500,7 +500,7 @@ func deleteProfileDirectory(profile string) {
func deleteMachineDirectories(cc *config.ClusterConfig) {
if cc != nil {
for _, n := range cc.Nodes {
machineName := driver.MachineName(*cc, n)
machineName := config.MachineName(*cc, n)
deleteProfileDirectory(machineName)
}
}

View File

@ -18,6 +18,7 @@ package cmd
import (
"github.com/spf13/cobra"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/mustload"
@ -46,7 +47,7 @@ var nodeDeleteCmd = &cobra.Command{
}
if driver.IsKIC(co.Config.Driver) {
machineName := driver.MachineName(*co.Config, *n)
machineName := config.MachineName(*co.Config, *n)
deletePossibleKicLeftOver(machineName, co.Config.Driver)
}

View File

@ -22,7 +22,7 @@ import (
"github.com/spf13/cobra"
"k8s.io/klog/v2"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/mustload"
"k8s.io/minikube/pkg/minikube/reason"
@ -47,7 +47,7 @@ var nodeListCmd = &cobra.Command{
}
for _, n := range cc.Nodes {
machineName := driver.MachineName(*cc, n)
machineName := config.MachineName(*cc, n)
fmt.Printf("%s\t%s\n", machineName, n.IP)
}
os.Exit(0)

View File

@ -21,7 +21,7 @@ import (
"github.com/spf13/cobra"
"github.com/spf13/viper"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/mustload"
@ -49,7 +49,7 @@ var nodeStartCmd = &cobra.Command{
exit.Error(reason.GuestNodeRetrieve, "retrieving node", err)
}
machineName := driver.MachineName(*cc, *n)
machineName := config.MachineName(*cc, *n)
if machine.IsRunning(api, machineName) {
out.Step(style.Check, "{{.name}} is already running", out.V{"name": name})
os.Exit(0)

View File

@ -18,7 +18,7 @@ package cmd
import (
"github.com/spf13/cobra"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/mustload"
@ -45,7 +45,7 @@ var nodeStopCmd = &cobra.Command{
exit.Error(reason.GuestNodeRetrieve, "retrieving node", err)
}
machineName := driver.MachineName(*cc, *n)
machineName := config.MachineName(*cc, *n)
err = machine.StopHost(api, machineName)
if err != nil {

View File

@ -24,9 +24,9 @@ import (
"k8s.io/klog/v2"
"k8s.io/minikube/pkg/minikube/cluster"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/machine"
@ -73,7 +73,7 @@ func runPause(cmd *cobra.Command, args []string) {
out.Step(style.Pause, "Pausing node {{.name}} ... ", out.V{"name": name})
host, err := machine.LoadHost(co.API, driver.MachineName(*co.Config, n))
host, err := machine.LoadHost(co.API, config.MachineName(*co.Config, n))
if err != nil {
exit.Error(reason.GuestLoadHost, "Error getting host", err)
}

View File

@ -20,7 +20,7 @@ import (
"path/filepath"
"github.com/spf13/cobra"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/mustload"
@ -41,7 +41,7 @@ var sshKeyCmd = &cobra.Command{
exit.Error(reason.GuestNodeRetrieve, "retrieving node", err)
}
out.Ln(filepath.Join(localpath.MiniPath(), "machines", driver.MachineName(*cc, *n), "id_rsa"))
out.Ln(filepath.Join(localpath.MiniPath(), "machines", config.MachineName(*cc, *n), "id_rsa"))
},
}

View File

@ -625,7 +625,7 @@ func hostDriver(existing *config.ClusterConfig) string {
klog.Warningf("Unable to get control plane from existing config: %v", err)
return existing.Driver
}
machineName := driver.MachineName(*existing, cp)
machineName := config.MachineName(*existing, cp)
h, err := api.Load(machineName)
if err != nil {
klog.Warningf("api.Load failed for %s: %v", machineName, err)

View File

@ -237,7 +237,7 @@ func writeStatusesAtInterval(duration time.Duration, api libmachine.API, cc *con
statuses = append(statuses, st)
} else {
for _, n := range cc.Nodes {
machineName := driver.MachineName(*cc, n)
machineName := config.MachineName(*cc, n)
klog.Infof("checking status of %s ...", machineName)
st, err := nodeStatus(api, *cc, n)
klog.Infof("%s status: %+v", machineName, st)
@ -301,7 +301,7 @@ func exitCode(statuses []*Status) int {
// nodeStatus looks up the status of a node
func nodeStatus(api libmachine.API, cc config.ClusterConfig, n config.Node) (*Status, error) {
controlPlane := n.ControlPlane
name := driver.MachineName(cc, n)
name := config.MachineName(cc, n)
st := &Status{
Name: name,

View File

@ -28,7 +28,6 @@ import (
"github.com/spf13/viper"
"k8s.io/klog/v2"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/kubeconfig"
"k8s.io/minikube/pkg/minikube/localpath"
@ -138,7 +137,7 @@ func stopProfile(profile string) int {
defer api.Close()
for _, n := range cc.Nodes {
machineName := driver.MachineName(*cc, n)
machineName := config.MachineName(*cc, n)
nonexistent := stop(api, machineName)
if !nonexistent {

View File

@ -24,9 +24,9 @@ import (
"k8s.io/klog/v2"
"k8s.io/minikube/pkg/minikube/cluster"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/machine"
@ -71,7 +71,7 @@ var unpauseCmd = &cobra.Command{
out.Step(style.Pause, "Unpausing node {{.name}} ... ", out.V{"name": name})
machineName := driver.MachineName(*co.Config, n)
machineName := config.MachineName(*co.Config, n)
host, err := machine.LoadHost(co.API, machineName)
if err != nil {
exit.Error(reason.GuestLoadHost, "Error getting host", err)

View File

@ -186,7 +186,7 @@ https://github.com/kubernetes/minikube/issues/7332`, out.V{"driver_name": cc.Dri
exit.Error(reason.GuestCpConfig, "Error getting primary control plane", err)
}
mName := driver.MachineName(*cc, cp)
mName := config.MachineName(*cc, cp)
host, err := machine.LoadHost(api, mName)
if err != nil || !machine.IsRunning(api, mName) {
klog.Warningf("%q is not running, setting %s=%v and skipping enablement (err=%v)", mName, addon.Name(), enable, err)
@ -296,8 +296,8 @@ func enableOrDisableStorageClasses(cc *config.ClusterConfig, name string, val st
if err != nil {
return errors.Wrap(err, "getting control plane")
}
if !machine.IsRunning(api, driver.MachineName(*cc, cp)) {
klog.Warningf("%q is not running, writing %s=%v to disk and skipping enablement", driver.MachineName(*cc, cp), name, val)
if !machine.IsRunning(api, config.MachineName(*cc, cp)) {
klog.Warningf("%q is not running, writing %s=%v to disk and skipping enablement", config.MachineName(*cc, cp), name, val)
return enableOrDisableAddon(cc, name, val)
}

View File

@ -123,5 +123,5 @@ func KubeNodeName(cc config.ClusterConfig, n config.Node) string {
hostname, _ := os.Hostname()
return hostname
}
return driver.MachineName(cc, n)
return config.MachineName(cc, n)
}

View File

@ -709,7 +709,7 @@ func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinC
}()
// Join the master by specifying its token
joinCmd = fmt.Sprintf("%s --node-name=%s", joinCmd, driver.MachineName(cc, n))
joinCmd = fmt.Sprintf("%s --node-name=%s", joinCmd, config.MachineName(cc, n))
join := func() error {
// reset first to clear any possibly existing state

View File

@ -27,7 +27,6 @@ import (
"k8s.io/minikube/pkg/minikube/bootstrapper/kubeadm"
"k8s.io/minikube/pkg/minikube/command"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/machine"
)
@ -61,7 +60,7 @@ func ControlPlaneBootstrapper(mAPI libmachine.API, cc *config.ClusterConfig, boo
if err != nil {
return nil, nil, errors.Wrap(err, "getting primary control plane")
}
h, err := machine.LoadHost(mAPI, driver.MachineName(*cc, cp))
h, err := machine.LoadHost(mAPI, config.MachineName(*cc, cp))
if err != nil {
return nil, nil, errors.Wrap(err, "getting control plane host")
}

View File

@ -188,3 +188,57 @@ func TestEncode(t *testing.T) {
b.Reset()
}
}
func TestMachineName(t *testing.T) {
testsCases := []struct {
ClusterConfig ClusterConfig
Want string
}{
{
ClusterConfig: ClusterConfig{Name: "minikube",
Nodes: []Node{
{
Name: "",
IP: "172.17.0.3",
Port: 8443,
KubernetesVersion: "v1.19.2",
ControlPlane: true,
Worker: true,
},
},
},
Want: "minikube",
},
{
ClusterConfig: ClusterConfig{Name: "p2",
Nodes: []Node{
{
Name: "",
IP: "172.17.0.3",
Port: 8443,
KubernetesVersion: "v1.19.2",
ControlPlane: true,
Worker: true,
},
{
Name: "m2",
IP: "172.17.0.4",
Port: 0,
KubernetesVersion: "v1.19.2",
ControlPlane: false,
Worker: true,
},
},
},
Want: "p2-m2",
},
}
for _, tc := range testsCases {
got := MachineName(tc.ClusterConfig, tc.ClusterConfig.Nodes[len(tc.ClusterConfig.Nodes)-1])
if got != tc.Want {
t.Errorf("Expected MachineName to be %q but got %q", tc.Want, got)
}
}
}

View File

@ -18,6 +18,7 @@ package config
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
@ -286,3 +287,12 @@ func ProfileFolderPath(profile string, miniHome ...string) string {
}
return filepath.Join(miniPath, "profiles", profile)
}
// MachineName returns the name of the machine, as seen by the hypervisor given the cluster and node names
func MachineName(cc ClusterConfig, n Node) string {
// For single node cluster, default to back to old naming
if len(cc.Nodes) == 1 || n.ControlPlane {
return cc.Name
}
return fmt.Sprintf("%s-%s", cc.Name, n.Name)
}

View File

@ -26,7 +26,6 @@ import (
"k8s.io/klog/v2"
"k8s.io/minikube/pkg/drivers/kic/oci"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/registry"
)
@ -305,15 +304,6 @@ func SetLibvirtURI(v string) {
}
// MachineName returns the name of the machine, as seen by the hypervisor given the cluster and node names
func MachineName(cc config.ClusterConfig, n config.Node) string {
// For single node cluster, default to back to old naming
if len(cc.Nodes) == 1 || n.ControlPlane {
return cc.Name
}
return fmt.Sprintf("%s-%s", cc.Name, n.Name)
}
// IndexFromMachineName returns the order of the container based on it is name
func IndexFromMachineName(machineName string) int {
// minikube-m02

View File

@ -203,60 +203,6 @@ func TestSuggest(t *testing.T) {
}
}
func TestMachineName(t *testing.T) {
testsCases := []struct {
ClusterConfig config.ClusterConfig
Want string
}{
{
ClusterConfig: config.ClusterConfig{Name: "minikube",
Nodes: []config.Node{
{
Name: "",
IP: "172.17.0.3",
Port: 8443,
KubernetesVersion: "v1.19.2",
ControlPlane: true,
Worker: true,
},
},
},
Want: "minikube",
},
{
ClusterConfig: config.ClusterConfig{Name: "p2",
Nodes: []config.Node{
{
Name: "",
IP: "172.17.0.3",
Port: 8443,
KubernetesVersion: "v1.19.2",
ControlPlane: true,
Worker: true,
},
{
Name: "m2",
IP: "172.17.0.4",
Port: 0,
KubernetesVersion: "v1.19.2",
ControlPlane: false,
Worker: true,
},
},
},
Want: "p2-m2",
},
}
for _, tc := range testsCases {
got := MachineName(tc.ClusterConfig, tc.ClusterConfig.Nodes[len(tc.ClusterConfig.Nodes)-1])
if got != tc.Want {
t.Errorf("Expected MachineName to be %q but got %q", tc.Want, got)
}
}
}
func TestIndexFromMachineName(t *testing.T) {
testCases := []struct {
Name string
@ -352,7 +298,7 @@ func TestIndexFromMachineNameClusterConfig(t *testing.T) {
}
for _, tc := range testsCases {
got := IndexFromMachineName(MachineName(tc.ClusterConfig, tc.ClusterConfig.Nodes[len(tc.ClusterConfig.Nodes)-1]))
got := IndexFromMachineName(config.MachineName(tc.ClusterConfig, tc.ClusterConfig.Nodes[len(tc.ClusterConfig.Nodes)-1]))
if got != tc.Want {
t.Errorf("expected IndexFromMachineName to be %d but got %d", tc.Want, got)
}

View File

@ -36,7 +36,6 @@ import (
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/image"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/vmpath"
@ -194,7 +193,7 @@ func CacheAndLoadImages(images []string) error {
}
for _, n := range c.Nodes {
m := driver.MachineName(*c, n)
m := config.MachineName(*c, n)
status, err := Status(api, m)
if err != nil {

View File

@ -342,7 +342,7 @@ func TestStopHost(t *testing.T) {
cc := defaultClusterConfig
cc.Name = viper.GetString("profile")
m := driver.MachineName(cc, config.Node{Name: "minikube"})
m := config.MachineName(cc, config.Node{Name: "minikube"})
if err := StopHost(api, m); err != nil {
t.Fatalf("Unexpected error stopping machine: %v", err)
}
@ -364,7 +364,7 @@ func TestDeleteHost(t *testing.T) {
cc := defaultClusterConfig
cc.Name = viper.GetString("profile")
if err := DeleteHost(api, driver.MachineName(cc, config.Node{Name: "minikube"}), false); err != nil {
if err := DeleteHost(api, config.MachineName(cc, config.Node{Name: "minikube"}), false); err != nil {
t.Fatalf("Unexpected error deleting host: %v", err)
}
}
@ -383,7 +383,7 @@ func TestDeleteHostErrorDeletingVM(t *testing.T) {
d := &tests.MockDriver{RemoveError: true, T: t}
h.Driver = d
if err := DeleteHost(api, driver.MachineName(defaultClusterConfig, config.Node{Name: "minikube"}), false); err == nil {
if err := DeleteHost(api, config.MachineName(defaultClusterConfig, config.Node{Name: "minikube"}), false); err == nil {
t.Fatal("Expected error deleting host.")
}
}
@ -399,7 +399,7 @@ func TestDeleteHostErrorDeletingFiles(t *testing.T) {
t.Errorf("createHost failed: %v", err)
}
if err := DeleteHost(api, driver.MachineName(defaultClusterConfig, config.Node{Name: "minikube"}), false); err == nil {
if err := DeleteHost(api, config.MachineName(defaultClusterConfig, config.Node{Name: "minikube"}), false); err == nil {
t.Fatal("Expected error deleting host.")
}
}
@ -417,7 +417,7 @@ func TestDeleteHostErrMachineNotExist(t *testing.T) {
t.Errorf("createHost failed: %v", err)
}
if err := DeleteHost(api, driver.MachineName(defaultClusterConfig, config.Node{Name: "minikube"}), false); err == nil {
if err := DeleteHost(api, config.MachineName(defaultClusterConfig, config.Node{Name: "minikube"}), false); err == nil {
t.Fatal("Expected error deleting host.")
}
}
@ -432,7 +432,7 @@ func TestStatus(t *testing.T) {
cc := defaultClusterConfig
cc.Name = viper.GetString("profile")
m := driver.MachineName(cc, config.Node{Name: "minikube"})
m := config.MachineName(cc, config.Node{Name: "minikube"})
checkState := func(expected string, machineName string) {
s, err := Status(api, machineName)
@ -452,7 +452,7 @@ func TestStatus(t *testing.T) {
cc.Name = viper.GetString("profile")
m = driver.MachineName(cc, config.Node{Name: "minikube"})
m = config.MachineName(cc, config.Node{Name: "minikube"})
checkState(state.Running.String(), m)

View File

@ -120,7 +120,7 @@ func delete(api libmachine.API, h *host.Host, machineName string) error {
// demolish destroys a host by any means necessary - use only if state is inconsistent
func demolish(api libmachine.API, cc config.ClusterConfig, n config.Node, h *host.Host) {
machineName := driver.MachineName(cc, n)
machineName := config.MachineName(cc, n)
klog.Infof("DEMOLISHING %s ...", machineName)
// This will probably fail

View File

@ -56,7 +56,7 @@ func fixHost(api libmachine.API, cc *config.ClusterConfig, n *config.Node) (*hos
klog.Infof("fixHost completed within %s", time.Since(start))
}()
h, err := api.Load(driver.MachineName(*cc, *n))
h, err := api.Load(config.MachineName(*cc, *n))
if err != nil {
return h, errors.Wrap(err, "Error loading existing host. Please try running [minikube delete], then run [minikube start] again.")
}
@ -99,7 +99,7 @@ func fixHost(api libmachine.API, cc *config.ClusterConfig, n *config.Node) (*hos
}
func recreateIfNeeded(api libmachine.API, cc *config.ClusterConfig, n *config.Node, h *host.Host) (*host.Host, error) {
machineName := driver.MachineName(*cc, *n)
machineName := config.MachineName(*cc, *n)
machineType := driver.MachineType(cc.Driver)
recreated := false
s, serr := h.Driver.GetState()

View File

@ -26,11 +26,10 @@ import (
"github.com/docker/machine/libmachine/state"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/driver"
)
func getHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.Host, error) {
machineName := driver.MachineName(cc, n)
machineName := config.MachineName(cc, n)
host, err := LoadHost(api, machineName)
if err != nil {
return nil, errors.Wrap(err, "host exists and load")

View File

@ -68,7 +68,7 @@ var requiredDirectories = []string{
// StartHost starts a host VM.
func StartHost(api libmachine.API, cfg *config.ClusterConfig, n *config.Node) (*host.Host, bool, error) {
machineName := driver.MachineName(*cfg, *n)
machineName := config.MachineName(*cfg, *n)
// Prevent machine-driver boot races, as well as our own certificate race
releaser, err := acquireMachinesLock(machineName, cfg.Driver)

View File

@ -90,7 +90,7 @@ func Running(name string) ClusterController {
exit.Error(reason.GuestCpConfig, "Unable to find control plane", err)
}
machineName := driver.MachineName(*cc, cp)
machineName := config.MachineName(*cc, cp)
hs, err := machine.Status(api, machineName)
if err != nil {
exit.Error(reason.GuestStatus, "Unable to get machine status", err)

View File

@ -67,7 +67,7 @@ func Delete(cc config.ClusterConfig, name string) (*config.Node, error) {
return n, errors.Wrap(err, "retrieve")
}
m := driver.MachineName(cc, *n)
m := config.MachineName(cc, *n)
api, err := machine.NewAPIClient()
if err != nil {
return n, err
@ -125,7 +125,7 @@ func Retrieve(cc config.ClusterConfig, name string) (*config.Node, int, error) {
}
// Accept full machine name as well as just node name
if driver.MachineName(cc, n) == name {
if config.MachineName(cc, n) == name {
klog.Infof("Couldn't find node name %s, but found it as a machine name, returning it anyway.", name)
return &n, i, nil
}

View File

@ -210,7 +210,7 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) {
// Provision provisions the machine/container for the node
func Provision(cc *config.ClusterConfig, n *config.Node, apiServer bool, delOnFail bool) (command.Runner, bool, libmachine.API, *host.Host, error) {
register.Reg.SetStep(register.StartingNode)
name := driver.MachineName(*cc, *n)
name := config.MachineName(*cc, *n)
if apiServer {
out.Step(style.ThumbsUp, "Starting control plane node {{.name}} in cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name})
} else {
@ -377,7 +377,7 @@ func startHost(api libmachine.API, cc *config.ClusterConfig, n *config.Node, del
klog.Warningf("error starting host: %v", err)
// NOTE: People get very cranky if you delete their prexisting VM. Only delete new ones.
if !exists {
err := machine.DeleteHost(api, driver.MachineName(*cc, *n))
err := machine.DeleteHost(api, config.MachineName(*cc, *n))
if err != nil {
klog.Warningf("delete host: %v", err)
}
@ -396,7 +396,7 @@ func startHost(api libmachine.API, cc *config.ClusterConfig, n *config.Node, del
if delOnFail {
klog.Info("Deleting existing host since delete-on-failure was set.")
// Delete the failed existing host
err := machine.DeleteHost(api, driver.MachineName(*cc, *n))
err := machine.DeleteHost(api, config.MachineName(*cc, *n))
if err != nil {
klog.Warningf("delete host: %v", err)
}

View File

@ -68,7 +68,7 @@ func configure(cc config.ClusterConfig, n config.Node) (interface{}, error) {
return kic.NewDriver(kic.Config{
ClusterName: cc.Name,
MachineName: driver.MachineName(cc, n),
MachineName: config.MachineName(cc, n),
StorePath: localpath.MiniPath(),
ImageDigest: cc.KicBaseImage,
Mounts: mounts,

View File

@ -66,7 +66,7 @@ func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) {
return &hyperkit.Driver{
BaseDriver: &drivers.BaseDriver{
MachineName: driver.MachineName(cfg, n),
MachineName: config.MachineName(cfg, n),
StorePath: localpath.MiniPath(),
SSHUser: "docker",
},

View File

@ -54,7 +54,7 @@ func init() {
}
func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) {
d := hyperv.NewDriver(driver.MachineName(cfg, n), localpath.MiniPath())
d := hyperv.NewDriver(config.MachineName(cfg, n), localpath.MiniPath())
d.Boot2DockerURL = download.LocalISOResource(cfg.MinikubeISO)
d.VSwitch = cfg.HypervVirtualSwitch
if d.VSwitch == "" && cfg.HypervUseExternalSwitch {

View File

@ -70,7 +70,7 @@ type kvmDriver struct {
}
func configure(cc config.ClusterConfig, n config.Node) (interface{}, error) {
name := driver.MachineName(cc, n)
name := config.MachineName(cc, n)
return kvmDriver{
BaseDriver: &drivers.BaseDriver{
MachineName: name,

View File

@ -45,7 +45,7 @@ func init() {
func configure(cc config.ClusterConfig, n config.Node) (interface{}, error) {
return none.NewDriver(none.Config{
MachineName: driver.MachineName(cc, n),
MachineName: config.MachineName(cc, n),
StorePath: localpath.MiniPath(),
ContainerRuntime: cc.KubernetesConfig.ContainerRuntime,
}), nil

View File

@ -46,7 +46,7 @@ func init() {
}
func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) {
d := parallels.NewDriver(driver.MachineName(cfg, n), localpath.MiniPath()).(*parallels.Driver)
d := parallels.NewDriver(config.MachineName(cfg, n), localpath.MiniPath()).(*parallels.Driver)
d.Boot2DockerURL = download.LocalISOResource(cfg.MinikubeISO)
d.Memory = cfg.Memory
d.CPU = cfg.CPUs

View File

@ -77,7 +77,7 @@ func configure(cc config.ClusterConfig, n config.Node) (interface{}, error) {
return kic.NewDriver(kic.Config{
ClusterName: cc.Name,
MachineName: driver.MachineName(cc, n),
MachineName: config.MachineName(cc, n),
StorePath: localpath.MiniPath(),
ImageDigest: strings.Split(cc.KicBaseImage, "@")[0], // for podman does not support docker images references with both a tag and digest.
Mounts: mounts,

View File

@ -52,7 +52,7 @@ func init() {
}
func configure(cc config.ClusterConfig, n config.Node) (interface{}, error) {
d := virtualbox.NewDriver(driver.MachineName(cc, n), localpath.MiniPath())
d := virtualbox.NewDriver(config.MachineName(cc, n), localpath.MiniPath())
d.Boot2DockerURL = download.LocalISOResource(cc.MinikubeISO)
d.Memory = cc.Memory
d.CPU = cc.CPUs

View File

@ -41,7 +41,7 @@ func init() {
}
func configure(cc config.ClusterConfig, n config.Node) (interface{}, error) {
d := vmwcfg.NewConfig(driver.MachineName(cc, n), localpath.MiniPath())
d := vmwcfg.NewConfig(config.MachineName(cc, n), localpath.MiniPath())
d.Boot2DockerURL = download.LocalISOResource(cc.MinikubeISO)
d.Memory = cc.Memory
d.CPU = cc.CPUs