Merge pull request #6836 from sharifelgamal/machinename
Decouple machine name from profile/cluster namepull/6880/head
commit
1327f9d3a5
|
@ -98,7 +98,7 @@ var printAddonsList = func() {
|
|||
table.SetAutoFormatHeaders(true)
|
||||
table.SetBorders(tablewriter.Border{Left: true, Top: true, Right: true, Bottom: true})
|
||||
table.SetCenterSeparator("|")
|
||||
pName := viper.GetString(config.MachineProfile)
|
||||
pName := viper.GetString(config.ProfileName)
|
||||
|
||||
for _, addonName := range addonNames {
|
||||
addonBundle := assets.Addons[addonName]
|
||||
|
@ -123,7 +123,7 @@ var printAddonsList = func() {
|
|||
|
||||
var printAddonsJSON = func() {
|
||||
addonNames := make([]string, 0, len(assets.Addons))
|
||||
pName := viper.GetString(config.MachineProfile)
|
||||
pName := viper.GetString(config.ProfileName)
|
||||
for addonName := range assets.Addons {
|
||||
addonNames = append(addonNames, addonName)
|
||||
}
|
||||
|
|
|
@ -132,7 +132,7 @@ var settings = []Setting{
|
|||
set: SetBool,
|
||||
},
|
||||
{
|
||||
name: config.MachineProfile,
|
||||
name: config.ProfileName,
|
||||
set: SetString,
|
||||
},
|
||||
{
|
||||
|
|
|
@ -35,7 +35,7 @@ var addonsDisableCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
addon := args[0]
|
||||
err := addons.Set(addon, "false", viper.GetString(config.MachineProfile))
|
||||
err := addons.Set(addon, "false", viper.GetString(config.ProfileName))
|
||||
if err != nil {
|
||||
exit.WithError("disable failed", err)
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ var addonsEnableCmd = &cobra.Command{
|
|||
exit.UsageT("usage: minikube addons enable ADDON_NAME")
|
||||
}
|
||||
addon := args[0]
|
||||
err := addons.Set(addon, "true", viper.GetString(config.MachineProfile))
|
||||
err := addons.Set(addon, "true", viper.GetString(config.ProfileName))
|
||||
if err != nil {
|
||||
exit.WithError("enable failed", err)
|
||||
}
|
||||
|
|
|
@ -26,7 +26,9 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/pkg/minikube/assets"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
pkg_config "k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
|
@ -67,8 +69,16 @@ var addonsOpenCmd = &cobra.Command{
|
|||
}
|
||||
defer api.Close()
|
||||
|
||||
profileName := viper.GetString(pkg_config.MachineProfile)
|
||||
if !machine.IsHostRunning(api, profileName) {
|
||||
profileName := viper.GetString(pkg_config.ProfileName)
|
||||
cc, err := config.Load(profileName)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting cluster", err)
|
||||
}
|
||||
cp, err := config.PrimaryControlPlane(*cc)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting control plane", err)
|
||||
}
|
||||
if !machine.IsHostRunning(api, driver.MachineName(*cc, cp)) {
|
||||
os.Exit(1)
|
||||
}
|
||||
addon, ok := assets.Addons[addonName] // validate addon input
|
||||
|
|
|
@ -35,7 +35,7 @@ var ProfileCmd = &cobra.Command{
|
|||
Long: "profile sets the current minikube profile, or gets the current profile if no arguments are provided. This is used to run and manage multiple minikube instance. You can return to the default minikube profile by running `minikube profile default`",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if len(args) == 0 {
|
||||
profile := viper.GetString(pkgConfig.MachineProfile)
|
||||
profile := viper.GetString(pkgConfig.ProfileName)
|
||||
out.T(out.Empty, profile)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
@ -69,7 +69,7 @@ var ProfileCmd = &cobra.Command{
|
|||
os.Exit(0)
|
||||
}
|
||||
|
||||
err := Set(pkgConfig.MachineProfile, profile)
|
||||
err := Set(pkgConfig.ProfileName, profile)
|
||||
if err != nil {
|
||||
exit.WithError("Setting profile failed", err)
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
|
@ -75,14 +76,13 @@ var printProfilesTable = func() {
|
|||
defer api.Close()
|
||||
|
||||
for _, p := range validProfiles {
|
||||
p.Status, err = machine.GetHostStatus(api, p.Name)
|
||||
if err != nil {
|
||||
glog.Warningf("error getting host status for %s: %v", p.Name, err)
|
||||
}
|
||||
cp, err := config.PrimaryControlPlane(*p.Config)
|
||||
if err != nil {
|
||||
glog.Errorf("%q has no control plane: %v", p.Name, err)
|
||||
// Print the data we know about anyways
|
||||
exit.WithError("error getting primary control plane", err)
|
||||
}
|
||||
p.Status, err = machine.GetHostStatus(api, driver.MachineName(*p.Config, cp))
|
||||
if err != nil {
|
||||
glog.Warningf("error getting host status for %s: %v", p.Name, err)
|
||||
}
|
||||
validData = append(validData, []string{p.Name, p.Config.Driver, p.Config.KubernetesConfig.ContainerRuntime, cp.IP, strconv.Itoa(cp.Port), p.Config.KubernetesConfig.KubernetesVersion, p.Status})
|
||||
}
|
||||
|
@ -117,7 +117,11 @@ var printProfilesJSON = func() {
|
|||
|
||||
validProfiles, invalidProfiles, err := config.ListProfiles()
|
||||
for _, v := range validProfiles {
|
||||
status, err := machine.GetHostStatus(api, v.Name)
|
||||
cp, err := config.PrimaryControlPlane(*v.Config)
|
||||
if err != nil {
|
||||
exit.WithError("error getting primary control plane", err)
|
||||
}
|
||||
status, err := machine.GetHostStatus(api, driver.MachineName(*v.Config, cp))
|
||||
if err != nil {
|
||||
glog.Warningf("error getting host status for %s: %v", v.Name, err)
|
||||
}
|
||||
|
|
|
@ -35,7 +35,9 @@ import (
|
|||
"github.com/spf13/viper"
|
||||
pkgaddons "k8s.io/minikube/pkg/addons"
|
||||
"k8s.io/minikube/pkg/minikube/assets"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
pkg_config "k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
|
@ -57,7 +59,7 @@ var dashboardCmd = &cobra.Command{
|
|||
Short: "Access the kubernetes dashboard running within the minikube cluster",
|
||||
Long: `Access the kubernetes dashboard running within the minikube cluster`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
profileName := viper.GetString(pkg_config.MachineProfile)
|
||||
profileName := viper.GetString(pkg_config.ProfileName)
|
||||
cc, err := pkg_config.Load(profileName)
|
||||
if err != nil && !pkg_config.IsNotExist(err) {
|
||||
exit.WithError("Error loading profile config", err)
|
||||
|
@ -80,7 +82,13 @@ var dashboardCmd = &cobra.Command{
|
|||
exit.WithError("Error getting client", err)
|
||||
}
|
||||
|
||||
if _, err = api.Load(cc.Name); err != nil {
|
||||
cp, err := config.PrimaryControlPlane(*cc)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting primary control plane", err)
|
||||
}
|
||||
|
||||
machineName := driver.MachineName(*cc, cp)
|
||||
if _, err = api.Load(machineName); err != nil {
|
||||
switch err := errors.Cause(err).(type) {
|
||||
case mcnerror.ErrHostDoesNotExist:
|
||||
exit.WithCodeT(exit.Unavailable, "{{.name}} cluster does not exist", out.V{"name": cc.Name})
|
||||
|
@ -101,7 +109,7 @@ var dashboardCmd = &cobra.Command{
|
|||
exit.WithCodeT(exit.NoInput, "kubectl not found in PATH, but is required for the dashboard. Installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl/")
|
||||
}
|
||||
|
||||
if !machine.IsHostRunning(api, profileName) {
|
||||
if !machine.IsHostRunning(api, machineName) {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
@ -127,7 +135,7 @@ var dashboardCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
out.ErrT(out.Launch, "Launching proxy ...")
|
||||
p, hostPort, err := kubectlProxy(kubectl, cc.Name)
|
||||
p, hostPort, err := kubectlProxy(kubectl, machineName)
|
||||
if err != nil {
|
||||
exit.WithError("kubectl proxy", err)
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ import (
|
|||
cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config"
|
||||
"k8s.io/minikube/pkg/drivers/kic/oci"
|
||||
"k8s.io/minikube/pkg/minikube/cluster"
|
||||
pkg_config "k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
|
@ -94,7 +94,7 @@ func runDelete(cmd *cobra.Command, args []string) {
|
|||
exit.UsageT("Usage: minikube delete")
|
||||
}
|
||||
|
||||
validProfiles, invalidProfiles, err := pkg_config.ListProfiles()
|
||||
validProfiles, invalidProfiles, err := config.ListProfiles()
|
||||
if err != nil {
|
||||
glog.Warningf("'error loading profiles in minikube home %q: %v", localpath.MiniPath(), err)
|
||||
}
|
||||
|
@ -137,13 +137,13 @@ func runDelete(cmd *cobra.Command, args []string) {
|
|||
exit.UsageT("usage: minikube delete")
|
||||
}
|
||||
|
||||
profileName := viper.GetString(pkg_config.MachineProfile)
|
||||
profile, err := pkg_config.LoadProfile(profileName)
|
||||
profileName := viper.GetString(config.ProfileName)
|
||||
profile, err := config.LoadProfile(profileName)
|
||||
if err != nil {
|
||||
out.ErrT(out.Meh, `"{{.name}}" profile does not exist, trying anyways.`, out.V{"name": profileName})
|
||||
}
|
||||
|
||||
errs := DeleteProfiles([]*pkg_config.Profile{profile})
|
||||
errs := DeleteProfiles([]*config.Profile{profile})
|
||||
if len(errs) > 0 {
|
||||
HandleDeletionErrors(errs)
|
||||
}
|
||||
|
@ -164,7 +164,7 @@ func purgeMinikubeDirectory() {
|
|||
}
|
||||
|
||||
// DeleteProfiles deletes one or more profiles
|
||||
func DeleteProfiles(profiles []*pkg_config.Profile) []error {
|
||||
func DeleteProfiles(profiles []*config.Profile) []error {
|
||||
var errs []error
|
||||
for _, profile := range profiles {
|
||||
err := deleteProfile(profile)
|
||||
|
@ -185,8 +185,8 @@ func DeleteProfiles(profiles []*pkg_config.Profile) []error {
|
|||
return errs
|
||||
}
|
||||
|
||||
func deleteProfile(profile *pkg_config.Profile) error {
|
||||
viper.Set(pkg_config.MachineProfile, profile.Name)
|
||||
func deleteProfile(profile *config.Profile) error {
|
||||
viper.Set(config.ProfileName, profile.Name)
|
||||
|
||||
delLabel := fmt.Sprintf("%s=%s", oci.ProfileLabelKey, profile.Name)
|
||||
errs := oci.DeleteContainersByLabel(oci.Docker, delLabel)
|
||||
|
@ -208,14 +208,14 @@ func deleteProfile(profile *pkg_config.Profile) error {
|
|||
return DeletionError{Err: delErr, Errtype: Fatal}
|
||||
}
|
||||
defer api.Close()
|
||||
cc, err := pkg_config.Load(profile.Name)
|
||||
if err != nil && !pkg_config.IsNotExist(err) {
|
||||
cc, err := config.Load(profile.Name)
|
||||
if err != nil && !config.IsNotExist(err) {
|
||||
delErr := profileDeletionErr(profile.Name, fmt.Sprintf("error loading profile config: %v", err))
|
||||
return DeletionError{Err: delErr, Errtype: MissingProfile}
|
||||
}
|
||||
|
||||
if err == nil && driver.BareMetal(cc.Driver) {
|
||||
if err := uninstallKubernetes(api, profile.Name, cc.KubernetesConfig, viper.GetString(cmdcfg.Bootstrapper)); err != nil {
|
||||
if err := uninstallKubernetes(api, *cc, cc.Nodes[0], viper.GetString(cmdcfg.Bootstrapper)); err != nil {
|
||||
deletionError, ok := err.(DeletionError)
|
||||
if ok {
|
||||
delErr := profileDeletionErr(profile.Name, fmt.Sprintf("%v", err))
|
||||
|
@ -230,21 +230,26 @@ func deleteProfile(profile *pkg_config.Profile) error {
|
|||
out.T(out.FailureType, "Failed to kill mount process: {{.error}}", out.V{"error": err})
|
||||
}
|
||||
|
||||
if err = machine.DeleteHost(api, profile.Name); err != nil {
|
||||
switch errors.Cause(err).(type) {
|
||||
case mcnerror.ErrHostDoesNotExist:
|
||||
glog.Infof("%s cluster does not exist. Proceeding ahead with cleanup.", profile.Name)
|
||||
default:
|
||||
out.T(out.FailureType, "Failed to delete cluster: {{.error}}", out.V{"error": err})
|
||||
out.T(out.Notice, `You may need to manually remove the "{{.name}}" VM from your hypervisor`, out.V{"name": profile.Name})
|
||||
if cc != nil {
|
||||
for _, n := range cc.Nodes {
|
||||
machineName := driver.MachineName(*cc, n)
|
||||
if err = machine.DeleteHost(api, machineName); err != nil {
|
||||
switch errors.Cause(err).(type) {
|
||||
case mcnerror.ErrHostDoesNotExist:
|
||||
glog.Infof("Host %s does not exist. Proceeding ahead with cleanup.", machineName)
|
||||
default:
|
||||
out.T(out.FailureType, "Failed to delete cluster: {{.error}}", out.V{"error": err})
|
||||
out.T(out.Notice, `You may need to manually remove the "{{.name}}" VM from your hypervisor`, out.V{"name": profile.Name})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// In case DeleteHost didn't complete the job.
|
||||
deleteProfileDirectory(profile.Name)
|
||||
|
||||
if err := pkg_config.DeleteProfile(profile.Name); err != nil {
|
||||
if pkg_config.IsNotExist(err) {
|
||||
if err := config.DeleteProfile(profile.Name); err != nil {
|
||||
if config.IsNotExist(err) {
|
||||
delErr := profileDeletionErr(profile.Name, fmt.Sprintf("\"%s\" profile does not exist", profile.Name))
|
||||
return DeletionError{Err: delErr, Errtype: MissingProfile}
|
||||
}
|
||||
|
@ -264,17 +269,17 @@ func deleteContext(machineName string) error {
|
|||
return DeletionError{Err: fmt.Errorf("update config: %v", err), Errtype: Fatal}
|
||||
}
|
||||
|
||||
if err := cmdcfg.Unset(pkg_config.MachineProfile); err != nil {
|
||||
if err := cmdcfg.Unset(config.ProfileName); err != nil {
|
||||
return DeletionError{Err: fmt.Errorf("unset minikube profile: %v", err), Errtype: Fatal}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteInvalidProfile(profile *pkg_config.Profile) []error {
|
||||
func deleteInvalidProfile(profile *config.Profile) []error {
|
||||
out.T(out.DeletingHost, "Trying to delete invalid profile {{.profile}}", out.V{"profile": profile.Name})
|
||||
|
||||
var errs []error
|
||||
pathToProfile := pkg_config.ProfileFolderPath(profile.Name, localpath.MiniPath())
|
||||
pathToProfile := config.ProfileFolderPath(profile.Name, localpath.MiniPath())
|
||||
if _, err := os.Stat(pathToProfile); !os.IsNotExist(err) {
|
||||
err := os.RemoveAll(pathToProfile)
|
||||
if err != nil {
|
||||
|
@ -296,14 +301,14 @@ func profileDeletionErr(profileName string, additionalInfo string) error {
|
|||
return fmt.Errorf("error deleting profile \"%s\": %s", profileName, additionalInfo)
|
||||
}
|
||||
|
||||
func uninstallKubernetes(api libmachine.API, profile string, kc pkg_config.KubernetesConfig, bsName string) error {
|
||||
out.T(out.Resetting, "Uninstalling Kubernetes {{.kubernetes_version}} using {{.bootstrapper_name}} ...", out.V{"kubernetes_version": kc.KubernetesVersion, "bootstrapper_name": bsName})
|
||||
clusterBootstrapper, err := cluster.Bootstrapper(api, bsName)
|
||||
func uninstallKubernetes(api libmachine.API, cc config.ClusterConfig, n config.Node, bsName string) error {
|
||||
out.T(out.Resetting, "Uninstalling Kubernetes {{.kubernetes_version}} using {{.bootstrapper_name}} ...", out.V{"kubernetes_version": cc.KubernetesConfig.KubernetesVersion, "bootstrapper_name": bsName})
|
||||
clusterBootstrapper, err := cluster.Bootstrapper(api, bsName, cc, n)
|
||||
if err != nil {
|
||||
return DeletionError{Err: fmt.Errorf("unable to get bootstrapper: %v", err), Errtype: Fatal}
|
||||
}
|
||||
|
||||
host, err := machine.CheckIfHostExistsAndLoad(api, profile)
|
||||
host, err := machine.CheckIfHostExistsAndLoad(api, driver.MachineName(cc, n))
|
||||
if err != nil {
|
||||
exit.WithError("Error getting host", err)
|
||||
}
|
||||
|
@ -312,7 +317,7 @@ func uninstallKubernetes(api libmachine.API, profile string, kc pkg_config.Kuber
|
|||
exit.WithError("Failed to get command runner", err)
|
||||
}
|
||||
|
||||
cr, err := cruntime.New(cruntime.Config{Type: kc.ContainerRuntime, Runner: r})
|
||||
cr, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime, Runner: r})
|
||||
if err != nil {
|
||||
exit.WithError("Failed runtime", err)
|
||||
}
|
||||
|
@ -323,7 +328,7 @@ func uninstallKubernetes(api libmachine.API, profile string, kc pkg_config.Kuber
|
|||
glog.Errorf("unpause failed: %v", err)
|
||||
}
|
||||
|
||||
if err = clusterBootstrapper.DeleteCluster(kc); err != nil {
|
||||
if err = clusterBootstrapper.DeleteCluster(cc.KubernetesConfig); err != nil {
|
||||
return DeletionError{Err: fmt.Errorf("failed to delete cluster: %v", err), Errtype: Fatal}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -141,7 +141,7 @@ func TestDeleteProfile(t *testing.T) {
|
|||
t.Errorf("machines mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
|
||||
viper.Set(config.MachineProfile, "")
|
||||
viper.Set(config.ProfileName, "")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -211,5 +211,5 @@ func TestDeleteAllProfiles(t *testing.T) {
|
|||
t.Errorf("Did not delete all machines, remaining: %v", afterMachines)
|
||||
}
|
||||
|
||||
viper.Set(config.MachineProfile, "")
|
||||
viper.Set(config.ProfileName, "")
|
||||
}
|
||||
|
|
|
@ -143,79 +143,82 @@ var dockerEnvCmd = &cobra.Command{
|
|||
}
|
||||
defer api.Close()
|
||||
|
||||
profile := viper.GetString(config.MachineProfile)
|
||||
profile := viper.GetString(config.ProfileName)
|
||||
cc, err := config.Load(profile)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting config", err)
|
||||
}
|
||||
host, err := machine.CheckIfHostExistsAndLoad(api, cc.Name)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting host", err)
|
||||
}
|
||||
if host.Driver.DriverName() == driver.None {
|
||||
exit.UsageT(`'none' driver does not support 'minikube docker-env' command`)
|
||||
}
|
||||
|
||||
hostSt, err := machine.GetHostStatus(api, cc.Name)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting host status", err)
|
||||
}
|
||||
if hostSt != state.Running.String() {
|
||||
exit.WithCodeT(exit.Unavailable, `'{{.profile}}' is not running`, out.V{"profile": profile})
|
||||
}
|
||||
ok, err := isDockerActive(host.Driver)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting service status", err)
|
||||
}
|
||||
|
||||
if !ok {
|
||||
exit.WithCodeT(exit.Unavailable, `The docker service within '{{.profile}}' is not active`, out.V{"profile": profile})
|
||||
}
|
||||
|
||||
hostIP, err := host.Driver.GetIP()
|
||||
if err != nil {
|
||||
exit.WithError("Error getting host IP", err)
|
||||
}
|
||||
|
||||
sh := shell.EnvConfig{
|
||||
Shell: shell.ForceShell,
|
||||
}
|
||||
|
||||
port := constants.DockerDaemonPort
|
||||
if driver.IsKIC(host.DriverName) { // for kic we need to find what port docker/podman chose for us
|
||||
hostIP = oci.DefaultBindIPV4
|
||||
port, err = oci.HostPortBinding(host.DriverName, profile, port)
|
||||
for _, n := range cc.Nodes {
|
||||
machineName := driver.MachineName(*cc, n)
|
||||
host, err := machine.CheckIfHostExistsAndLoad(api, machineName)
|
||||
if err != nil {
|
||||
exit.WithCodeT(exit.Failure, "Error getting port binding for '{{.driver_name}} driver: {{.error}}", out.V{"driver_name": host.DriverName, "error": err})
|
||||
exit.WithError("Error getting host", err)
|
||||
}
|
||||
if host.Driver.DriverName() == driver.None {
|
||||
exit.UsageT(`'none' driver does not support 'minikube docker-env' command`)
|
||||
}
|
||||
}
|
||||
|
||||
ec := DockerEnvConfig{
|
||||
EnvConfig: sh,
|
||||
profile: profile,
|
||||
driver: host.DriverName,
|
||||
hostIP: hostIP,
|
||||
port: port,
|
||||
certsDir: localpath.MakeMiniPath("certs"),
|
||||
noProxy: noProxy,
|
||||
}
|
||||
|
||||
if ec.Shell == "" {
|
||||
ec.Shell, err = shell.Detect()
|
||||
hostSt, err := machine.GetHostStatus(api, machineName)
|
||||
if err != nil {
|
||||
exit.WithError("Error detecting shell", err)
|
||||
exit.WithError("Error getting host status", err)
|
||||
}
|
||||
}
|
||||
|
||||
if dockerUnset {
|
||||
if err := dockerUnsetScript(ec, os.Stdout); err != nil {
|
||||
exit.WithError("Error generating unset output", err)
|
||||
if hostSt != state.Running.String() {
|
||||
exit.WithCodeT(exit.Unavailable, `'{{.profile}}' is not running`, out.V{"profile": profile})
|
||||
}
|
||||
ok, err := isDockerActive(host.Driver)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting service status", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err := dockerSetScript(ec, os.Stdout); err != nil {
|
||||
exit.WithError("Error generating set output", err)
|
||||
if !ok {
|
||||
exit.WithCodeT(exit.Unavailable, `The docker service within '{{.profile}}' is not active`, out.V{"profile": profile})
|
||||
}
|
||||
|
||||
hostIP, err := host.Driver.GetIP()
|
||||
if err != nil {
|
||||
exit.WithError("Error getting host IP", err)
|
||||
}
|
||||
|
||||
sh := shell.EnvConfig{
|
||||
Shell: shell.ForceShell,
|
||||
}
|
||||
|
||||
port := constants.DockerDaemonPort
|
||||
if driver.IsKIC(host.DriverName) { // for kic we need to find what port docker/podman chose for us
|
||||
hostIP = oci.DefaultBindIPV4
|
||||
port, err = oci.HostPortBinding(host.DriverName, profile, port)
|
||||
if err != nil {
|
||||
exit.WithCodeT(exit.Failure, "Error getting port binding for '{{.driver_name}} driver: {{.error}}", out.V{"driver_name": host.DriverName, "error": err})
|
||||
}
|
||||
}
|
||||
|
||||
ec := DockerEnvConfig{
|
||||
EnvConfig: sh,
|
||||
profile: profile,
|
||||
driver: host.DriverName,
|
||||
hostIP: hostIP,
|
||||
port: port,
|
||||
certsDir: localpath.MakeMiniPath("certs"),
|
||||
noProxy: noProxy,
|
||||
}
|
||||
|
||||
if ec.Shell == "" {
|
||||
ec.Shell, err = shell.Detect()
|
||||
if err != nil {
|
||||
exit.WithError("Error detecting shell", err)
|
||||
}
|
||||
}
|
||||
|
||||
if dockerUnset {
|
||||
if err := dockerUnsetScript(ec, os.Stdout); err != nil {
|
||||
exit.WithError("Error generating unset output", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err := dockerSetScript(ec, os.Stdout); err != nil {
|
||||
exit.WithError("Error generating set output", err)
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
|
@ -39,23 +40,26 @@ var ipCmd = &cobra.Command{
|
|||
}
|
||||
defer api.Close()
|
||||
|
||||
cc, err := config.Load(viper.GetString(config.MachineProfile))
|
||||
cc, err := config.Load(viper.GetString(config.ProfileName))
|
||||
if err != nil {
|
||||
exit.WithError("Error getting config", err)
|
||||
}
|
||||
host, err := api.Load(cc.Name)
|
||||
if err != nil {
|
||||
switch err := errors.Cause(err).(type) {
|
||||
case mcnerror.ErrHostDoesNotExist:
|
||||
exit.WithCodeT(exit.NoInput, `"{{.profile_name}}" host does not exist, unable to show an IP`, out.V{"profile_name": cc.Name})
|
||||
default:
|
||||
exit.WithError("Error getting host", err)
|
||||
for _, n := range cc.Nodes {
|
||||
machineName := driver.MachineName(*cc, n)
|
||||
host, err := api.Load(machineName)
|
||||
if err != nil {
|
||||
switch err := errors.Cause(err).(type) {
|
||||
case mcnerror.ErrHostDoesNotExist:
|
||||
exit.WithCodeT(exit.NoInput, `"{{.profile_name}}" host does not exist, unable to show an IP`, out.V{"profile_name": cc.Name})
|
||||
default:
|
||||
exit.WithError("Error getting host", err)
|
||||
}
|
||||
}
|
||||
ip, err := host.Driver.GetIP()
|
||||
if err != nil {
|
||||
exit.WithError("Error getting IP", err)
|
||||
}
|
||||
out.Ln(ip)
|
||||
}
|
||||
ip, err := host.Driver.GetIP()
|
||||
if err != nil {
|
||||
exit.WithError("Error getting IP", err)
|
||||
}
|
||||
out.Ln(ip)
|
||||
},
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ minikube kubectl -- get pods --namespace kube-system`,
|
|||
}
|
||||
defer api.Close()
|
||||
|
||||
cc, err := config.Load(viper.GetString(config.MachineProfile))
|
||||
cc, err := config.Load(viper.GetString(config.ProfileName))
|
||||
if err != nil && !config.IsNotExist(err) {
|
||||
out.ErrLn("Error loading profile config: %v", err)
|
||||
}
|
||||
|
|
|
@ -23,9 +23,11 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/cluster"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/logs"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/node"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -34,6 +36,7 @@ const (
|
|||
)
|
||||
|
||||
var (
|
||||
nodeName string
|
||||
// followLogs triggers tail -f mode
|
||||
followLogs bool
|
||||
// numberOfLines is how many lines to output, set via -n
|
||||
|
@ -48,18 +51,33 @@ var logsCmd = &cobra.Command{
|
|||
Short: "Gets the logs of the running instance, used for debugging minikube, not user code.",
|
||||
Long: `Gets the logs of the running instance, used for debugging minikube, not user code.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
cfg, err := config.Load(viper.GetString(config.MachineProfile))
|
||||
cfg, err := config.Load(viper.GetString(config.ProfileName))
|
||||
if err != nil {
|
||||
exit.WithError("Error getting config", err)
|
||||
}
|
||||
|
||||
if nodeName == "" {
|
||||
cp, err := config.PrimaryControlPlane(*cfg)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting primary control plane", err)
|
||||
}
|
||||
nodeName = cp.Name
|
||||
}
|
||||
|
||||
n, _, err := node.Retrieve(cfg, nodeName)
|
||||
if err != nil {
|
||||
exit.WithError("Error retrieving node", err)
|
||||
}
|
||||
|
||||
machineName := driver.MachineName(*cfg, *n)
|
||||
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
exit.WithError("Error getting client", err)
|
||||
}
|
||||
defer api.Close()
|
||||
|
||||
h, err := api.Load(cfg.Name)
|
||||
h, err := api.Load(machineName)
|
||||
if err != nil {
|
||||
exit.WithError("api load", err)
|
||||
}
|
||||
|
@ -67,7 +85,7 @@ var logsCmd = &cobra.Command{
|
|||
if err != nil {
|
||||
exit.WithError("command runner", err)
|
||||
}
|
||||
bs, err := cluster.Bootstrapper(api, viper.GetString(cmdcfg.Bootstrapper))
|
||||
bs, err := cluster.Bootstrapper(api, viper.GetString(cmdcfg.Bootstrapper), *cfg, *n)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting cluster bootstrapper", err)
|
||||
}
|
||||
|
@ -99,4 +117,5 @@ func init() {
|
|||
logsCmd.Flags().BoolVarP(&followLogs, "follow", "f", false, "Show only the most recent journal entries, and continuously print new entries as they are appended to the journal.")
|
||||
logsCmd.Flags().BoolVar(&showProblems, "problems", false, "Show only log entries which point to known problems")
|
||||
logsCmd.Flags().IntVarP(&numberOfLines, "length", "n", 60, "Number of lines back to go within the log")
|
||||
logsCmd.Flags().StringVar(&nodeName, "node", "", "The node to get logs from. Defaults to the primary control plane.")
|
||||
}
|
||||
|
|
|
@ -104,11 +104,16 @@ var mountCmd = &cobra.Command{
|
|||
exit.WithError("Error getting client", err)
|
||||
}
|
||||
defer api.Close()
|
||||
cc, err := config.Load(viper.GetString(config.MachineProfile))
|
||||
cc, err := config.Load(viper.GetString(config.ProfileName))
|
||||
if err != nil {
|
||||
exit.WithError("Error getting config", err)
|
||||
}
|
||||
host, err := api.Load(cc.Name)
|
||||
|
||||
cp, err := config.PrimaryControlPlane(*cc)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting primary cp", err)
|
||||
}
|
||||
host, err := api.Load(driver.MachineName(*cc, cp))
|
||||
if err != nil {
|
||||
exit.WithError("Error loading api", err)
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
|
@ -29,32 +29,31 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
nodeName string
|
||||
cp bool
|
||||
worker bool
|
||||
cp bool
|
||||
worker bool
|
||||
)
|
||||
var nodeAddCmd = &cobra.Command{
|
||||
Use: "add",
|
||||
Short: "Adds a node to the given cluster.",
|
||||
Long: "Adds a node to the given cluster config, and starts it.",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
profile := viper.GetString(config.MachineProfile)
|
||||
mc, err := config.Load(profile)
|
||||
profile := viper.GetString(config.ProfileName)
|
||||
cc, err := config.Load(profile)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting config", err)
|
||||
}
|
||||
name := nodeName
|
||||
if nodeName == "" {
|
||||
name = profile + strconv.Itoa(len(mc.Nodes)+1)
|
||||
}
|
||||
|
||||
//name := profile + strconv.Itoa(len(mc.Nodes)+1)
|
||||
name := fmt.Sprintf("m%d", len(cc.Nodes)+1)
|
||||
|
||||
out.T(out.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": profile})
|
||||
|
||||
n, err := node.Add(mc, name, cp, worker, "", profile)
|
||||
n, err := node.Add(cc, name, cp, worker, "", profile)
|
||||
if err != nil {
|
||||
exit.WithError("Error adding node to cluster", err)
|
||||
}
|
||||
|
||||
_, err = node.Start(*mc, *n, false, nil)
|
||||
_, err = node.Start(*cc, *n, false, nil)
|
||||
if err != nil {
|
||||
exit.WithError("Error starting node", err)
|
||||
}
|
||||
|
@ -64,7 +63,6 @@ var nodeAddCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
func init() {
|
||||
nodeAddCmd.Flags().StringVar(&nodeName, "name", "", "The name of the node to add.")
|
||||
nodeAddCmd.Flags().BoolVar(&cp, "control-plane", false, "If true, the node added will also be a control plane in addition to a worker.")
|
||||
nodeAddCmd.Flags().BoolVar(&worker, "worker", true, "If true, the added node will be marked for work. Defaults to true.")
|
||||
//We should figure out which of these flags to actually import
|
||||
|
|
|
@ -36,7 +36,7 @@ var nodeDeleteCmd = &cobra.Command{
|
|||
}
|
||||
name := args[0]
|
||||
|
||||
profile := viper.GetString(config.MachineProfile)
|
||||
profile := viper.GetString(config.ProfileName)
|
||||
out.T(out.DeletingHost, "Deleting node {{.name}} from cluster {{.cluster}}", out.V{"name": name, "cluster": profile})
|
||||
|
||||
cc, err := config.Load(profile)
|
||||
|
|
|
@ -50,7 +50,7 @@ var nodeStartCmd = &cobra.Command{
|
|||
os.Exit(0)
|
||||
}
|
||||
|
||||
cc, err := config.Load(viper.GetString(config.MachineProfile))
|
||||
cc, err := config.Load(viper.GetString(config.ProfileName))
|
||||
if err != nil {
|
||||
exit.WithError("loading config", err)
|
||||
}
|
||||
|
|
|
@ -18,8 +18,12 @@ package cmd
|
|||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/node"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
)
|
||||
|
||||
|
@ -39,7 +43,19 @@ var nodeStopCmd = &cobra.Command{
|
|||
exit.WithError("creating api client", err)
|
||||
}
|
||||
|
||||
err = machine.StopHost(api, name)
|
||||
cc, err := config.Load(viper.GetString(config.ProfileName))
|
||||
if err != nil {
|
||||
exit.WithError("getting config", err)
|
||||
}
|
||||
|
||||
n, _, err := node.Retrieve(cc, name)
|
||||
if err != nil {
|
||||
exit.WithError("retrieving node", err)
|
||||
}
|
||||
|
||||
machineName := driver.MachineName(*cc, *n)
|
||||
|
||||
err = machine.StopHost(api, machineName)
|
||||
if err != nil {
|
||||
out.FatalT("Failed to stop node {{.name}}", out.V{"name": name})
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/cluster"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
|
@ -45,7 +46,7 @@ var pauseCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
func runPause(cmd *cobra.Command, args []string) {
|
||||
cname := viper.GetString(config.MachineProfile)
|
||||
cname := viper.GetString(config.ProfileName)
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
exit.WithError("Error getting client", err)
|
||||
|
@ -63,37 +64,40 @@ func runPause(cmd *cobra.Command, args []string) {
|
|||
}
|
||||
|
||||
glog.Infof("config: %+v", cc)
|
||||
host, err := machine.CheckIfHostExistsAndLoad(api, cname)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting host", err)
|
||||
}
|
||||
|
||||
r, err := machine.CommandRunner(host)
|
||||
if err != nil {
|
||||
exit.WithError("Failed to get command runner", err)
|
||||
}
|
||||
for _, n := range cc.Nodes {
|
||||
host, err := machine.CheckIfHostExistsAndLoad(api, driver.MachineName(*cc, n))
|
||||
if err != nil {
|
||||
exit.WithError("Error getting host", err)
|
||||
}
|
||||
|
||||
cr, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime, Runner: r})
|
||||
if err != nil {
|
||||
exit.WithError("Failed runtime", err)
|
||||
}
|
||||
r, err := machine.CommandRunner(host)
|
||||
if err != nil {
|
||||
exit.WithError("Failed to get command runner", err)
|
||||
}
|
||||
|
||||
glog.Infof("namespaces: %v keys: %v", namespaces, viper.AllSettings())
|
||||
if allNamespaces {
|
||||
namespaces = nil //all
|
||||
} else if len(namespaces) == 0 {
|
||||
exit.WithCodeT(exit.BadUsage, "Use -A to specify all namespaces")
|
||||
}
|
||||
cr, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime, Runner: r})
|
||||
if err != nil {
|
||||
exit.WithError("Failed runtime", err)
|
||||
}
|
||||
|
||||
ids, err := cluster.Pause(cr, r, namespaces)
|
||||
if err != nil {
|
||||
exit.WithError("Pause", err)
|
||||
}
|
||||
glog.Infof("namespaces: %v keys: %v", namespaces, viper.AllSettings())
|
||||
if allNamespaces {
|
||||
namespaces = nil //all
|
||||
} else if len(namespaces) == 0 {
|
||||
exit.WithCodeT(exit.BadUsage, "Use -A to specify all namespaces")
|
||||
}
|
||||
|
||||
if namespaces == nil {
|
||||
out.T(out.Unpause, "Paused kubelet and {{.count}} containers", out.V{"count": len(ids)})
|
||||
} else {
|
||||
out.T(out.Unpause, "Paused kubelet and {{.count}} containers in: {{.namespaces}}", out.V{"count": len(ids), "namespaces": strings.Join(namespaces, ", ")})
|
||||
ids, err := cluster.Pause(cr, r, namespaces)
|
||||
if err != nil {
|
||||
exit.WithError("Pause", err)
|
||||
}
|
||||
|
||||
if namespaces == nil {
|
||||
out.T(out.Unpause, "Paused kubelet and {{.count}} containers", out.V{"count": len(ids)})
|
||||
} else {
|
||||
out.T(out.Unpause, "Paused kubelet and {{.count}} containers in: {{.namespaces}}", out.V{"count": len(ids), "namespaces": strings.Join(namespaces, ", ")})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -114,66 +114,69 @@ var podmanEnvCmd = &cobra.Command{
|
|||
}
|
||||
defer api.Close()
|
||||
|
||||
profile := viper.GetString(config.MachineProfile)
|
||||
profile := viper.GetString(config.ProfileName)
|
||||
cc, err := config.Load(profile)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting config", err)
|
||||
}
|
||||
host, err := machine.CheckIfHostExistsAndLoad(api, cc.Name)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting host", err)
|
||||
}
|
||||
if host.Driver.DriverName() == driver.None {
|
||||
exit.UsageT(`'none' driver does not support 'minikube podman-env' command`)
|
||||
}
|
||||
|
||||
hostSt, err := machine.GetHostStatus(api, cc.Name)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting host status", err)
|
||||
}
|
||||
if hostSt != state.Running.String() {
|
||||
exit.WithCodeT(exit.Unavailable, `'{{.profile}}' is not running`, out.V{"profile": profile})
|
||||
}
|
||||
ok, err := isPodmanAvailable(host)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting service status", err)
|
||||
}
|
||||
|
||||
if !ok {
|
||||
exit.WithCodeT(exit.Unavailable, `The podman service within '{{.profile}}' is not active`, out.V{"profile": profile})
|
||||
}
|
||||
|
||||
client, err := createExternalSSHClient(host.Driver)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting ssh client", err)
|
||||
}
|
||||
|
||||
sh := shell.EnvConfig{
|
||||
Shell: shell.ForceShell,
|
||||
}
|
||||
ec := PodmanEnvConfig{
|
||||
EnvConfig: sh,
|
||||
profile: profile,
|
||||
driver: host.DriverName,
|
||||
client: client,
|
||||
}
|
||||
|
||||
if ec.Shell == "" {
|
||||
ec.Shell, err = shell.Detect()
|
||||
for _, n := range cc.Nodes {
|
||||
machineName := driver.MachineName(*cc, n)
|
||||
host, err := machine.CheckIfHostExistsAndLoad(api, machineName)
|
||||
if err != nil {
|
||||
exit.WithError("Error detecting shell", err)
|
||||
exit.WithError("Error getting host", err)
|
||||
}
|
||||
}
|
||||
|
||||
if podmanUnset {
|
||||
if err := podmanUnsetScript(ec, os.Stdout); err != nil {
|
||||
exit.WithError("Error generating unset output", err)
|
||||
if host.Driver.DriverName() == driver.None {
|
||||
exit.UsageT(`'none' driver does not support 'minikube podman-env' command`)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err := podmanSetScript(ec, os.Stdout); err != nil {
|
||||
exit.WithError("Error generating set output", err)
|
||||
hostSt, err := machine.GetHostStatus(api, machineName)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting host status", err)
|
||||
}
|
||||
if hostSt != state.Running.String() {
|
||||
exit.WithCodeT(exit.Unavailable, `'{{.profile}}' is not running`, out.V{"profile": profile})
|
||||
}
|
||||
ok, err := isPodmanAvailable(host)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting service status", err)
|
||||
}
|
||||
|
||||
if !ok {
|
||||
exit.WithCodeT(exit.Unavailable, `The podman service within '{{.profile}}' is not active`, out.V{"profile": profile})
|
||||
}
|
||||
|
||||
client, err := createExternalSSHClient(host.Driver)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting ssh client", err)
|
||||
}
|
||||
|
||||
sh := shell.EnvConfig{
|
||||
Shell: shell.ForceShell,
|
||||
}
|
||||
ec := PodmanEnvConfig{
|
||||
EnvConfig: sh,
|
||||
profile: profile,
|
||||
driver: host.DriverName,
|
||||
client: client,
|
||||
}
|
||||
|
||||
if ec.Shell == "" {
|
||||
ec.Shell, err = shell.Detect()
|
||||
if err != nil {
|
||||
exit.WithError("Error detecting shell", err)
|
||||
}
|
||||
}
|
||||
|
||||
if podmanUnset {
|
||||
if err := podmanUnsetScript(ec, os.Stdout); err != nil {
|
||||
exit.WithError("Error generating unset output", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err := podmanSetScript(ec, os.Stdout); err != nil {
|
||||
exit.WithError("Error generating set output", err)
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
|
|
@ -156,7 +156,7 @@ func setFlagsUsingViper() {
|
|||
|
||||
func init() {
|
||||
translate.DetermineLocale()
|
||||
RootCmd.PersistentFlags().StringP(config.MachineProfile, "p", constants.DefaultMachineName, `The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently.`)
|
||||
RootCmd.PersistentFlags().StringP(config.ProfileName, "p", constants.DefaultClusterName, `The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently.`)
|
||||
RootCmd.PersistentFlags().StringP(configCmd.Bootstrapper, "b", "kubeadm", "The name of the cluster bootstrapper that will set up the kubernetes cluster.")
|
||||
|
||||
groups := templates.CommandGroups{
|
||||
|
|
|
@ -36,6 +36,7 @@ import (
|
|||
"k8s.io/minikube/pkg/drivers/kic/oci"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
pkg_config "k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/localpath"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
|
@ -82,15 +83,19 @@ var serviceCmd = &cobra.Command{
|
|||
}
|
||||
defer api.Close()
|
||||
|
||||
profileName := viper.GetString(pkg_config.MachineProfile)
|
||||
if !machine.IsHostRunning(api, profileName) {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
profileName := viper.GetString(pkg_config.ProfileName)
|
||||
cfg, err := config.Load(profileName)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting config", err)
|
||||
}
|
||||
cp, err := config.PrimaryControlPlane(*cfg)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting control plane", err)
|
||||
}
|
||||
machineName := driver.MachineName(*cfg, cp)
|
||||
if !machine.IsHostRunning(api, machineName) {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if runtime.GOOS == "darwin" && cfg.Driver == oci.Docker {
|
||||
startKicServiceTunnel(svc, cfg.Name)
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"k8s.io/minikube/pkg/drivers/kic/oci"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
pkg_config "k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
|
@ -46,8 +47,16 @@ var serviceListCmd = &cobra.Command{
|
|||
exit.WithError("Error getting client", err)
|
||||
}
|
||||
defer api.Close()
|
||||
profileName := viper.GetString(pkg_config.MachineProfile)
|
||||
if !machine.IsHostRunning(api, profileName) {
|
||||
profileName := viper.GetString(pkg_config.ProfileName)
|
||||
cfg, err := config.Load(profileName)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting config", err)
|
||||
}
|
||||
cp, err := config.PrimaryControlPlane(*cfg)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting primary control plane", err)
|
||||
}
|
||||
if !machine.IsHostRunning(api, driver.MachineName(*cfg, cp)) {
|
||||
exit.WithCodeT(exit.Unavailable, "profile {{.name}} is not running.", out.V{"name": profileName})
|
||||
}
|
||||
serviceURLs, err := service.GetServiceURLs(api, serviceListNamespace, serviceURLTemplate)
|
||||
|
@ -56,10 +65,6 @@ var serviceListCmd = &cobra.Command{
|
|||
out.ErrT(out.Notice, "Check that minikube is running and that you have specified the correct namespace (-n flag) if required.")
|
||||
os.Exit(exit.Unavailable)
|
||||
}
|
||||
cfg, err := config.Load(viper.GetString(config.MachineProfile))
|
||||
if err != nil {
|
||||
exit.WithError("Error getting config", err)
|
||||
}
|
||||
|
||||
var data [][]string
|
||||
for _, serviceURL := range serviceURLs {
|
||||
|
|
|
@ -33,7 +33,7 @@ var sshKeyCmd = &cobra.Command{
|
|||
Short: "Retrieve the ssh identity key path of the specified cluster",
|
||||
Long: "Retrieve the ssh identity key path of the specified cluster.",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
cc, err := config.Load(viper.GetString(config.MachineProfile))
|
||||
cc, err := config.Load(viper.GetString(config.ProfileName))
|
||||
if err != nil {
|
||||
exit.WithError("Getting machine config failed", err)
|
||||
}
|
||||
|
|
|
@ -45,11 +45,16 @@ var sshCmd = &cobra.Command{
|
|||
exit.WithError("Error getting client", err)
|
||||
}
|
||||
defer api.Close()
|
||||
cc, err := config.Load(viper.GetString(config.MachineProfile))
|
||||
cc, err := config.Load(viper.GetString(config.ProfileName))
|
||||
if err != nil {
|
||||
exit.WithError("Error getting config", err)
|
||||
}
|
||||
host, err := machine.CheckIfHostExistsAndLoad(api, cc.Name)
|
||||
// TODO: allow choice of node to ssh into
|
||||
cp, err := config.PrimaryControlPlane(*cc)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting primary control plane", err)
|
||||
}
|
||||
host, err := machine.CheckIfHostExistsAndLoad(api, driver.MachineName(*cc, cp))
|
||||
if err != nil {
|
||||
exit.WithError("Error getting host", err)
|
||||
}
|
||||
|
@ -62,7 +67,7 @@ var sshCmd = &cobra.Command{
|
|||
ssh.SetDefaultClient(ssh.External)
|
||||
}
|
||||
|
||||
err = machine.CreateSSHShell(api, args)
|
||||
err = machine.CreateSSHShell(api, *cc, cp, args)
|
||||
if err != nil {
|
||||
// This is typically due to a non-zero exit code, so no need for flourish.
|
||||
out.ErrLn("ssh: %v", err)
|
||||
|
|
|
@ -287,7 +287,7 @@ func runStart(cmd *cobra.Command, args []string) {
|
|||
registryMirror = viper.GetStringSlice("registry_mirror")
|
||||
}
|
||||
|
||||
existing, err := config.Load(viper.GetString(config.MachineProfile))
|
||||
existing, err := config.Load(viper.GetString(config.ProfileName))
|
||||
if err != nil && !config.IsNotExist(err) {
|
||||
exit.WithCodeT(exit.Data, "Unable to load config: {{.error}}", out.V{"error": err})
|
||||
}
|
||||
|
@ -365,8 +365,8 @@ func cacheISO(cfg *config.ClusterConfig, driverName string) {
|
|||
|
||||
func displayVersion(version string) {
|
||||
prefix := ""
|
||||
if viper.GetString(config.MachineProfile) != constants.DefaultMachineName {
|
||||
prefix = fmt.Sprintf("[%s] ", viper.GetString(config.MachineProfile))
|
||||
if viper.GetString(config.ProfileName) != constants.DefaultClusterName {
|
||||
prefix = fmt.Sprintf("[%s] ", viper.GetString(config.ProfileName))
|
||||
}
|
||||
|
||||
versionState := out.Happy
|
||||
|
@ -514,7 +514,12 @@ func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) {
|
|||
return
|
||||
}
|
||||
|
||||
machineName := viper.GetString(config.MachineProfile)
|
||||
cp, err := config.PrimaryControlPlane(*existing)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting primary cp", err)
|
||||
}
|
||||
|
||||
machineName := driver.MachineName(*existing, cp)
|
||||
h, err := api.Load(machineName)
|
||||
if err != nil {
|
||||
glog.Warningf("selectDriver api.Load: %v", err)
|
||||
|
@ -593,8 +598,8 @@ func selectImageRepository(mirrorCountry string) (bool, string, error) {
|
|||
|
||||
// Return a minikube command containing the current profile name
|
||||
func minikubeCmd() string {
|
||||
if viper.GetString(config.MachineProfile) != constants.DefaultMachineName {
|
||||
return fmt.Sprintf("minikube -p %s", config.MachineProfile)
|
||||
if viper.GetString(config.ProfileName) != constants.DefaultClusterName {
|
||||
return fmt.Sprintf("minikube -p %s", config.ProfileName)
|
||||
}
|
||||
return "minikube"
|
||||
}
|
||||
|
@ -624,7 +629,7 @@ func validateUser(drvName string) {
|
|||
if !useForce {
|
||||
os.Exit(exit.Permissions)
|
||||
}
|
||||
_, err = config.Load(viper.GetString(config.MachineProfile))
|
||||
_, err = config.Load(viper.GetString(config.ProfileName))
|
||||
if err == nil || !config.IsNotExist(err) {
|
||||
out.T(out.Tip, "Tip: To remove this root owned cluster, run: sudo {{.cmd}} delete", out.V{"cmd": minikubeCmd()})
|
||||
}
|
||||
|
@ -687,7 +692,7 @@ func validateFlags(cmd *cobra.Command, drvName string) {
|
|||
}
|
||||
|
||||
if driver.BareMetal(drvName) {
|
||||
if viper.GetString(config.MachineProfile) != constants.DefaultMachineName {
|
||||
if viper.GetString(config.ProfileName) != constants.DefaultClusterName {
|
||||
exit.WithCodeT(exit.Config, "The '{{.name}} driver does not support multiple profiles: https://minikube.sigs.k8s.io/docs/reference/drivers/none/", out.V{"name": drvName})
|
||||
}
|
||||
|
||||
|
@ -775,7 +780,7 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string)
|
|||
|
||||
var kubeNodeName string
|
||||
if drvName != driver.None {
|
||||
kubeNodeName = viper.GetString(config.MachineProfile)
|
||||
kubeNodeName = "m01"
|
||||
}
|
||||
|
||||
// Create the initial node, which will necessarily be a control plane
|
||||
|
@ -788,7 +793,7 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string)
|
|||
}
|
||||
|
||||
cfg := config.ClusterConfig{
|
||||
Name: viper.GetString(config.MachineProfile),
|
||||
Name: viper.GetString(config.ProfileName),
|
||||
KeepContext: viper.GetBool(keepContext),
|
||||
EmbedCerts: viper.GetBool(embedCerts),
|
||||
MinikubeISO: viper.GetString(isoURL),
|
||||
|
@ -822,7 +827,7 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string)
|
|||
NatNicType: viper.GetString(natNicType),
|
||||
KubernetesConfig: config.KubernetesConfig{
|
||||
KubernetesVersion: k8sVersion,
|
||||
ClusterName: viper.GetString(config.MachineProfile),
|
||||
ClusterName: viper.GetString(config.ProfileName),
|
||||
APIServerName: viper.GetString(apiServerName),
|
||||
APIServerNames: apiServerNames,
|
||||
APIServerIPs: apiServerIPs,
|
||||
|
@ -941,7 +946,7 @@ func getKubernetesVersion(old *config.ClusterConfig) string {
|
|||
if nvs.LT(ovs) {
|
||||
nv = version.VersionPrefix + ovs.String()
|
||||
profileArg := ""
|
||||
if old.Name != constants.DefaultMachineName {
|
||||
if old.Name != constants.DefaultClusterName {
|
||||
profileArg = fmt.Sprintf("-p %s", old.Name)
|
||||
}
|
||||
exit.WithCodeT(exit.Config, `Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:
|
||||
|
|
|
@ -34,6 +34,7 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/cluster"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/kubeconfig"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
|
@ -95,7 +96,17 @@ var statusCmd = &cobra.Command{
|
|||
}
|
||||
defer api.Close()
|
||||
|
||||
machineName := viper.GetString(config.MachineProfile)
|
||||
cc, err := config.Load(viper.GetString(config.ProfileName))
|
||||
if err != nil {
|
||||
exit.WithError("getting config", err)
|
||||
}
|
||||
|
||||
cp, err := config.PrimaryControlPlane(*cc)
|
||||
if err != nil {
|
||||
exit.WithError("getting primary control plane", err)
|
||||
}
|
||||
|
||||
machineName := driver.MachineName(*cc, cp)
|
||||
st, err := status(api, machineName)
|
||||
if err != nil {
|
||||
glog.Errorf("status error: %v", err)
|
||||
|
|
|
@ -19,12 +19,15 @@ package cmd
|
|||
import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/machine/libmachine"
|
||||
"github.com/docker/machine/libmachine/mcnerror"
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
pkg_config "k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/kubeconfig"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
|
@ -43,37 +46,24 @@ itself, leaving all files intact. The cluster can be started again with the "sta
|
|||
|
||||
// runStop handles the executes the flow of "minikube stop"
|
||||
func runStop(cmd *cobra.Command, args []string) {
|
||||
profile := viper.GetString(pkg_config.MachineProfile)
|
||||
profile := viper.GetString(pkg_config.ProfileName)
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
exit.WithError("Error getting client", err)
|
||||
}
|
||||
defer api.Close()
|
||||
|
||||
nonexistent := false
|
||||
stop := func() (err error) {
|
||||
err = machine.StopHost(api, profile)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
glog.Warningf("stop host returned error: %v", err)
|
||||
|
||||
switch err := errors.Cause(err).(type) {
|
||||
case mcnerror.ErrHostDoesNotExist:
|
||||
out.T(out.Meh, `"{{.profile_name}}" does not exist, nothing to stop`, out.V{"profile_name": profile})
|
||||
nonexistent = true
|
||||
return nil
|
||||
default:
|
||||
return err
|
||||
}
|
||||
cc, err := config.Load(profile)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting cluster config", err)
|
||||
}
|
||||
|
||||
if err := retry.Expo(stop, 5*time.Second, 3*time.Minute, 5); err != nil {
|
||||
exit.WithError("Unable to stop VM", err)
|
||||
}
|
||||
for _, n := range cc.Nodes {
|
||||
nonexistent := stop(api, *cc, n)
|
||||
|
||||
if !nonexistent {
|
||||
out.T(out.Stopped, `"{{.profile_name}}" stopped.`, out.V{"profile_name": profile})
|
||||
if !nonexistent {
|
||||
out.T(out.Stopped, `"{{.node_name}}" stopped.`, out.V{"node_name": n.Name})
|
||||
}
|
||||
}
|
||||
|
||||
if err := killMountProcess(); err != nil {
|
||||
|
@ -85,3 +75,30 @@ func runStop(cmd *cobra.Command, args []string) {
|
|||
exit.WithError("update config", err)
|
||||
}
|
||||
}
|
||||
|
||||
func stop(api libmachine.API, cluster config.ClusterConfig, n config.Node) bool {
|
||||
nonexistent := false
|
||||
stop := func() (err error) {
|
||||
machineName := driver.MachineName(cluster, n)
|
||||
err = machine.StopHost(api, machineName)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
glog.Warningf("stop host returned error: %v", err)
|
||||
|
||||
switch err := errors.Cause(err).(type) {
|
||||
case mcnerror.ErrHostDoesNotExist:
|
||||
out.T(out.Meh, `"{{.profile_name}}" does not exist, nothing to stop`, out.V{"profile_name": cluster})
|
||||
nonexistent = true
|
||||
return nil
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := retry.Expo(stop, 5*time.Second, 3*time.Minute, 5); err != nil {
|
||||
exit.WithError("Unable to stop VM", err)
|
||||
}
|
||||
|
||||
return nonexistent
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@ var tunnelCmd = &cobra.Command{
|
|||
exit.WithError("error creating clientset", err)
|
||||
}
|
||||
|
||||
cfg, err := config.Load(viper.GetString(config.MachineProfile))
|
||||
cfg, err := config.Load(viper.GetString(config.ProfileName))
|
||||
if err != nil {
|
||||
exit.WithError("Error getting config", err)
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/cluster"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
|
@ -37,7 +38,7 @@ var unpauseCmd = &cobra.Command{
|
|||
Use: "unpause",
|
||||
Short: "unpause Kubernetes",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
cname := viper.GetString(config.MachineProfile)
|
||||
cname := viper.GetString(config.ProfileName)
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
exit.WithError("Error getting client", err)
|
||||
|
@ -54,39 +55,43 @@ var unpauseCmd = &cobra.Command{
|
|||
os.Exit(1)
|
||||
}
|
||||
glog.Infof("config: %+v", cc)
|
||||
host, err := machine.CheckIfHostExistsAndLoad(api, cname)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting host", err)
|
||||
}
|
||||
|
||||
r, err := machine.CommandRunner(host)
|
||||
if err != nil {
|
||||
exit.WithError("Failed to get command runner", err)
|
||||
}
|
||||
|
||||
cr, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime, Runner: r})
|
||||
if err != nil {
|
||||
exit.WithError("Failed runtime", err)
|
||||
}
|
||||
|
||||
glog.Infof("namespaces: %v keys: %v", namespaces, viper.AllSettings())
|
||||
if allNamespaces {
|
||||
namespaces = nil //all
|
||||
} else {
|
||||
if len(namespaces) == 0 {
|
||||
exit.WithCodeT(exit.BadUsage, "Use -A to specify all namespaces")
|
||||
for _, n := range cc.Nodes {
|
||||
machineName := driver.MachineName(*cc, n)
|
||||
host, err := machine.CheckIfHostExistsAndLoad(api, machineName)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting host", err)
|
||||
}
|
||||
}
|
||||
|
||||
ids, err := cluster.Unpause(cr, r, namespaces)
|
||||
if err != nil {
|
||||
exit.WithError("Pause", err)
|
||||
}
|
||||
r, err := machine.CommandRunner(host)
|
||||
if err != nil {
|
||||
exit.WithError("Failed to get command runner", err)
|
||||
}
|
||||
|
||||
if namespaces == nil {
|
||||
out.T(out.Pause, "Unpaused kubelet and {{.count}} containers", out.V{"count": len(ids)})
|
||||
} else {
|
||||
out.T(out.Pause, "Unpaused kubelet and {{.count}} containers in: {{.namespaces}}", out.V{"count": len(ids), "namespaces": strings.Join(namespaces, ", ")})
|
||||
cr, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime, Runner: r})
|
||||
if err != nil {
|
||||
exit.WithError("Failed runtime", err)
|
||||
}
|
||||
|
||||
glog.Infof("namespaces: %v keys: %v", namespaces, viper.AllSettings())
|
||||
if allNamespaces {
|
||||
namespaces = nil //all
|
||||
} else {
|
||||
if len(namespaces) == 0 {
|
||||
exit.WithCodeT(exit.BadUsage, "Use -A to specify all namespaces")
|
||||
}
|
||||
}
|
||||
|
||||
ids, err := cluster.Unpause(cr, r, namespaces)
|
||||
if err != nil {
|
||||
exit.WithError("Pause", err)
|
||||
}
|
||||
|
||||
if namespaces == nil {
|
||||
out.T(out.Pause, "Unpaused kubelet and {{.count}} containers", out.V{"count": len(ids)})
|
||||
} else {
|
||||
out.T(out.Pause, "Unpaused kubelet and {{.count}} containers in: {{.namespaces}}", out.V{"count": len(ids), "namespaces": strings.Join(namespaces, ", ")})
|
||||
}
|
||||
}
|
||||
|
||||
},
|
||||
|
|
|
@ -39,7 +39,7 @@ var updateContextCmd = &cobra.Command{
|
|||
exit.WithError("Error getting client", err)
|
||||
}
|
||||
defer api.Close()
|
||||
machineName := viper.GetString(config.MachineProfile)
|
||||
machineName := viper.GetString(config.ProfileName)
|
||||
ip, err := cluster.GetHostDriverIP(api, machineName)
|
||||
if err != nil {
|
||||
exit.WithError("Error host driver ip status", err)
|
||||
|
|
|
@ -30,6 +30,7 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/assets"
|
||||
"k8s.io/minikube/pkg/minikube/command"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
|
@ -246,7 +247,16 @@ func enableOrDisableStorageClasses(name, val, profile string) error {
|
|||
}
|
||||
defer api.Close()
|
||||
|
||||
if !machine.IsHostRunning(api, profile) {
|
||||
cc, err := config.Load(profile)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "getting cluster")
|
||||
}
|
||||
|
||||
cp, err := config.PrimaryControlPlane(*cc)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "getting control plane")
|
||||
}
|
||||
if !machine.IsHostRunning(api, driver.MachineName(*cc, cp)) {
|
||||
glog.Warningf("%q is not running, writing %s=%v to disk and skipping enablement", profile, name, val)
|
||||
return enableOrDisableAddon(name, val, profile)
|
||||
}
|
||||
|
|
|
@ -69,6 +69,7 @@ func (d *Driver) Create() error {
|
|||
Name: d.NodeConfig.MachineName,
|
||||
Image: d.NodeConfig.ImageDigest,
|
||||
ClusterLabel: oci.ProfileLabelKey + "=" + d.MachineName,
|
||||
NodeLabel: oci.NodeLabelKey + "=" + d.NodeConfig.MachineName,
|
||||
CPUs: strconv.Itoa(d.NodeConfig.CPU),
|
||||
Memory: strconv.Itoa(d.NodeConfig.Memory) + "mb",
|
||||
Envs: d.NodeConfig.Envs,
|
||||
|
|
|
@ -97,6 +97,8 @@ func CreateContainerNode(p CreateParams) error {
|
|||
"--label", p.ClusterLabel,
|
||||
// label the node with the role ID
|
||||
"--label", fmt.Sprintf("%s=%s", nodeRoleLabelKey, p.Role),
|
||||
// label th enode wuth the node ID
|
||||
"--label", p.NodeLabel,
|
||||
}
|
||||
|
||||
if p.OCIBinary == Podman { // enable execing in /var
|
||||
|
@ -109,7 +111,7 @@ func CreateContainerNode(p CreateParams) error {
|
|||
runArgs = append(runArgs, "--volume", fmt.Sprintf("%s:/var:exec", hostVarVolPath))
|
||||
}
|
||||
if p.OCIBinary == Docker {
|
||||
if err := createDockerVolume(p.Name); err != nil {
|
||||
if err := createDockerVolume(p.Name, p.Name); err != nil {
|
||||
return errors.Wrapf(err, "creating volume for %s container", p.Name)
|
||||
}
|
||||
glog.Infof("Successfully created a docker volume %s", p.Name)
|
||||
|
|
|
@ -25,6 +25,8 @@ const (
|
|||
Podman = "podman"
|
||||
// ProfileLabelKey is applied to any container or volume created by a specific minikube profile name.minikube.sigs.k8s.io=PROFILE_NAME
|
||||
ProfileLabelKey = "name.minikube.sigs.k8s.io"
|
||||
// NodeLabelKey is applied to each volume so it can be referred to by name
|
||||
NodeLabelKey = "mode.minikube.sigs.k8s.io"
|
||||
// NodeRoleKey is used to identify if it is control plane or worker
|
||||
nodeRoleLabelKey = "role.minikube.sigs.k8s.io"
|
||||
// CreatedByLabelKey is applied to any container/volume that is created by minikube created_by.minikube.sigs.k8s.io=true
|
||||
|
@ -35,7 +37,8 @@ const (
|
|||
type CreateParams struct {
|
||||
Name string // used for container name and hostname
|
||||
Image string // container image to use to create the node.
|
||||
ClusterLabel string // label the containers we create using minikube so we can clean up
|
||||
ClusterLabel string // label the clusters we create using minikube so we can clean up
|
||||
NodeLabel string // label the nodes so we can clean up by node name
|
||||
Role string // currently only role supported is control-plane
|
||||
Mounts []Mount // volume mounts
|
||||
APIServerPort int // kubernetes api server port
|
||||
|
|
|
@ -119,11 +119,11 @@ func ExtractTarballToVolume(tarballPath, volumeName, imageName string) error {
|
|||
// createDockerVolume creates a docker volume to be attached to the container with correct labels and prefixes based on profile name
|
||||
// Caution ! if volume already exists does NOT return an error and will not apply the minikube labels on it.
|
||||
// TODO: this should be fixed as a part of https://github.com/kubernetes/minikube/issues/6530
|
||||
func createDockerVolume(name string) error {
|
||||
func createDockerVolume(profile string, nodeName string) error {
|
||||
if err := PointToHostDockerDaemon(); err != nil {
|
||||
return errors.Wrap(err, "point host docker daemon")
|
||||
}
|
||||
cmd := exec.Command(Docker, "volume", "create", name, "--label", fmt.Sprintf("%s=%s", ProfileLabelKey, name), "--label", fmt.Sprintf("%s=%s", CreatedByLabelKey, "true"))
|
||||
cmd := exec.Command(Docker, "volume", "create", nodeName, "--label", fmt.Sprintf("%s=%s", ProfileLabelKey, profile), "--label", fmt.Sprintf("%s=%s", CreatedByLabelKey, "true"))
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
return errors.Wrapf(err, "output %s", string(out))
|
||||
}
|
||||
|
|
|
@ -36,7 +36,6 @@ import (
|
|||
"github.com/docker/machine/libmachine/state"
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/minikube/pkg/drivers/kic"
|
||||
|
@ -65,8 +64,9 @@ type Bootstrapper struct {
|
|||
}
|
||||
|
||||
// NewBootstrapper creates a new kubeadm.Bootstrapper
|
||||
func NewBootstrapper(api libmachine.API) (*Bootstrapper, error) {
|
||||
name := viper.GetString(config.MachineProfile)
|
||||
// TODO(#6891): Remove node as an argument
|
||||
func NewBootstrapper(api libmachine.API, cc config.ClusterConfig, n config.Node) (*Bootstrapper, error) {
|
||||
name := driver.MachineName(cc, n)
|
||||
h, err := api.Load(name)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getting api client")
|
||||
|
@ -75,7 +75,7 @@ func NewBootstrapper(api libmachine.API) (*Bootstrapper, error) {
|
|||
if err != nil {
|
||||
return nil, errors.Wrap(err, "command runner")
|
||||
}
|
||||
return &Bootstrapper{c: runner, contextName: name, k8sClient: nil}, nil
|
||||
return &Bootstrapper{c: runner, contextName: cc.Name, k8sClient: nil}, nil
|
||||
}
|
||||
|
||||
// GetKubeletStatus returns the kubelet status
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper/kubeadm"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
)
|
||||
|
||||
|
@ -42,12 +43,13 @@ func init() {
|
|||
}
|
||||
|
||||
// Bootstrapper returns a new bootstrapper for the cluster
|
||||
func Bootstrapper(api libmachine.API, bootstrapperName string) (bootstrapper.Bootstrapper, error) {
|
||||
// TODO(#6891): Remove node as an argument
|
||||
func Bootstrapper(api libmachine.API, bootstrapperName string, cc config.ClusterConfig, n config.Node) (bootstrapper.Bootstrapper, error) {
|
||||
var b bootstrapper.Bootstrapper
|
||||
var err error
|
||||
switch bootstrapperName {
|
||||
case bootstrapper.Kubeadm:
|
||||
b, err = kubeadm.NewBootstrapper(api)
|
||||
b, err = kubeadm.NewBootstrapper(api, cc, n)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getting a new kubeadm bootstrapper")
|
||||
}
|
||||
|
|
|
@ -41,8 +41,8 @@ const (
|
|||
WantKubectlDownloadMsg = "WantKubectlDownloadMsg"
|
||||
// WantNoneDriverWarning is the key for WantNoneDriverWarning
|
||||
WantNoneDriverWarning = "WantNoneDriverWarning"
|
||||
// MachineProfile is the key for MachineProfile
|
||||
MachineProfile = "profile"
|
||||
// ProfileName represents the key for the global profile parameter
|
||||
ProfileName = "profile"
|
||||
// ShowDriverDeprecationNotification is the key for ShowDriverDeprecationNotification
|
||||
ShowDriverDeprecationNotification = "ShowDriverDeprecationNotification"
|
||||
// ShowBootstrapperDeprecationNotification is the key for ShowBootstrapperDeprecationNotification
|
||||
|
|
|
@ -33,11 +33,8 @@ const (
|
|||
NewestKubernetesVersion = "v1.17.3"
|
||||
// OldestKubernetesVersion is the oldest Kubernetes version to test against
|
||||
OldestKubernetesVersion = "v1.11.10"
|
||||
// DefaultMachineName is the default name for the VM
|
||||
DefaultMachineName = "minikube"
|
||||
// DefaultNodeName is the default name for the kubeadm node within the VM
|
||||
DefaultNodeName = "minikube"
|
||||
|
||||
// DefaultClusterName is the default nane for the k8s cluster
|
||||
DefaultClusterName = "minikube"
|
||||
// DockerDaemonPort is the port Docker daemon listening inside a minikube node (vm or container).
|
||||
DockerDaemonPort = 2376
|
||||
// APIServerPort is the default API server port
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/minikube/pkg/drivers/kic"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/registry"
|
||||
)
|
||||
|
||||
|
@ -212,3 +213,12 @@ func SetLibvirtURI(v string) {
|
|||
os.Setenv("LIBVIRT_DEFAULT_URI", v)
|
||||
|
||||
}
|
||||
|
||||
// MachineName returns the name of the machine, as seen by the hypervisor given the cluster and node names
|
||||
func MachineName(cc config.ClusterConfig, n config.Node) string {
|
||||
// For single node cluster, default to back to old naming
|
||||
if len(cc.Nodes) == 1 || n.ControlPlane {
|
||||
return cc.Name
|
||||
}
|
||||
return fmt.Sprintf("%s-%s", cc.Name, n.Name)
|
||||
}
|
||||
|
|
|
@ -37,6 +37,7 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/image"
|
||||
"k8s.io/minikube/pkg/minikube/localpath"
|
||||
"k8s.io/minikube/pkg/minikube/vmpath"
|
||||
|
@ -168,28 +169,31 @@ func CacheAndLoadImages(images []string) error {
|
|||
}
|
||||
for _, p := range profiles { // loading images to all running profiles
|
||||
pName := p.Name // capture the loop variable
|
||||
status, err := GetHostStatus(api, pName)
|
||||
c, err := config.Load(pName)
|
||||
if err != nil {
|
||||
glog.Warningf("skipping loading cache for profile %s", pName)
|
||||
glog.Errorf("error getting status for %s: %v", pName, err)
|
||||
continue // try next machine
|
||||
return err
|
||||
}
|
||||
if status == state.Running.String() { // the not running hosts will load on next start
|
||||
h, err := api.Load(pName)
|
||||
for _, n := range c.Nodes {
|
||||
m := driver.MachineName(*c, n)
|
||||
status, err := GetHostStatus(api, m)
|
||||
if err != nil {
|
||||
return err
|
||||
glog.Warningf("skipping loading cache for profile %s", pName)
|
||||
glog.Errorf("error getting status for %s: %v", pName, err)
|
||||
continue // try next machine
|
||||
}
|
||||
cr, err := CommandRunner(h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c, err := config.Load(pName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = LoadImages(c, cr, images, constants.ImageCacheDir)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to load cached images for profile %s. make sure the profile is running. %v", pName, err)
|
||||
if status == state.Running.String() { // the not running hosts will load on next start
|
||||
h, err := api.Load(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cr, err := CommandRunner(h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = LoadImages(c, cr, images, constants.ImageCacheDir)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to load cached images for profile %s. make sure the profile is running. %v", pName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,7 +41,7 @@ type MockDownloader struct{}
|
|||
func (d MockDownloader) GetISOFileURI(isoURL string) string { return "" }
|
||||
func (d MockDownloader) CacheMinikubeISOFromURL(isoURL string) error { return nil }
|
||||
|
||||
func createMockDriverHost(c config.ClusterConfig) (interface{}, error) {
|
||||
func createMockDriverHost(c config.ClusterConfig, n config.Node) (interface{}, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
@ -61,10 +61,12 @@ func RegisterMockDriver(t *testing.T) {
|
|||
}
|
||||
|
||||
var defaultClusterConfig = config.ClusterConfig{
|
||||
Name: viper.GetString("profile"),
|
||||
Driver: driver.Mock,
|
||||
MinikubeISO: constants.DefaultISOURL,
|
||||
Downloader: MockDownloader{},
|
||||
DockerEnv: []string{"MOCK_MAKE_IT_PROVISION=true"},
|
||||
Nodes: []config.Node{config.Node{Name: "minikube"}},
|
||||
}
|
||||
|
||||
func TestCreateHost(t *testing.T) {
|
||||
|
@ -76,7 +78,7 @@ func TestCreateHost(t *testing.T) {
|
|||
t.Fatal("Machine already exists.")
|
||||
}
|
||||
|
||||
_, err := createHost(api, defaultClusterConfig)
|
||||
_, err := createHost(api, defaultClusterConfig, config.Node{Name: "minikube"})
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating host: %v", err)
|
||||
}
|
||||
|
@ -114,7 +116,7 @@ func TestStartHostExists(t *testing.T) {
|
|||
RegisterMockDriver(t)
|
||||
api := tests.NewMockAPI(t)
|
||||
// Create an initial host.
|
||||
ih, err := createHost(api, defaultClusterConfig)
|
||||
ih, err := createHost(api, defaultClusterConfig, config.Node{Name: "minikube"})
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating host: %v", err)
|
||||
}
|
||||
|
@ -130,8 +132,11 @@ func TestStartHostExists(t *testing.T) {
|
|||
|
||||
mc := defaultClusterConfig
|
||||
mc.Name = ih.Name
|
||||
|
||||
n := config.Node{Name: ih.Name}
|
||||
|
||||
// This should pass without calling Create because the host exists already.
|
||||
h, err := StartHost(api, mc)
|
||||
h, err := StartHost(api, mc, n)
|
||||
if err != nil {
|
||||
t.Fatalf("Error starting host: %v", err)
|
||||
}
|
||||
|
@ -151,7 +156,7 @@ func TestStartHostErrMachineNotExist(t *testing.T) {
|
|||
api := tests.NewMockAPI(t)
|
||||
// Create an incomplete host with machine does not exist error(i.e. User Interrupt Cancel)
|
||||
api.NotExistError = true
|
||||
h, err := createHost(api, defaultClusterConfig)
|
||||
h, err := createHost(api, defaultClusterConfig, config.Node{Name: "minikube"})
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating host: %v", err)
|
||||
}
|
||||
|
@ -162,8 +167,10 @@ func TestStartHostErrMachineNotExist(t *testing.T) {
|
|||
mc := defaultClusterConfig
|
||||
mc.Name = h.Name
|
||||
|
||||
n := config.Node{Name: h.Name}
|
||||
|
||||
// This should pass with creating host, while machine does not exist.
|
||||
h, err = StartHost(api, mc)
|
||||
h, err = StartHost(api, mc, n)
|
||||
if err != nil {
|
||||
if err != ErrorMachineNotExist {
|
||||
t.Fatalf("Error starting host: %v", err)
|
||||
|
@ -171,9 +178,10 @@ func TestStartHostErrMachineNotExist(t *testing.T) {
|
|||
}
|
||||
|
||||
mc.Name = h.Name
|
||||
n.Name = h.Name
|
||||
|
||||
// Second call. This should pass without calling Create because the host exists already.
|
||||
h, err = StartHost(api, mc)
|
||||
h, err = StartHost(api, mc, n)
|
||||
if err != nil {
|
||||
t.Fatalf("Error starting host: %v", err)
|
||||
}
|
||||
|
@ -193,7 +201,7 @@ func TestStartStoppedHost(t *testing.T) {
|
|||
RegisterMockDriver(t)
|
||||
api := tests.NewMockAPI(t)
|
||||
// Create an initial host.
|
||||
h, err := createHost(api, defaultClusterConfig)
|
||||
h, err := createHost(api, defaultClusterConfig, config.Node{Name: "minikube"})
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating host: %v", err)
|
||||
}
|
||||
|
@ -205,7 +213,8 @@ func TestStartStoppedHost(t *testing.T) {
|
|||
provision.SetDetector(md)
|
||||
mc := defaultClusterConfig
|
||||
mc.Name = h.Name
|
||||
h, err = StartHost(api, mc)
|
||||
n := config.Node{Name: h.Name}
|
||||
h, err = StartHost(api, mc, n)
|
||||
if err != nil {
|
||||
t.Fatal("Error starting host.")
|
||||
}
|
||||
|
@ -233,7 +242,7 @@ func TestStartHost(t *testing.T) {
|
|||
md := &tests.MockDetector{Provisioner: &tests.MockProvisioner{}}
|
||||
provision.SetDetector(md)
|
||||
|
||||
h, err := StartHost(api, defaultClusterConfig)
|
||||
h, err := StartHost(api, defaultClusterConfig, config.Node{Name: "minikube"})
|
||||
if err != nil {
|
||||
t.Fatal("Error starting host.")
|
||||
}
|
||||
|
@ -261,26 +270,26 @@ func TestStartHostConfig(t *testing.T) {
|
|||
md := &tests.MockDetector{Provisioner: &tests.MockProvisioner{}}
|
||||
provision.SetDetector(md)
|
||||
|
||||
config := config.ClusterConfig{
|
||||
cfg := config.ClusterConfig{
|
||||
Driver: driver.Mock,
|
||||
DockerEnv: []string{"FOO=BAR"},
|
||||
DockerOpt: []string{"param=value"},
|
||||
Downloader: MockDownloader{},
|
||||
}
|
||||
|
||||
h, err := StartHost(api, config)
|
||||
h, err := StartHost(api, cfg, config.Node{Name: "minikube"})
|
||||
if err != nil {
|
||||
t.Fatal("Error starting host.")
|
||||
}
|
||||
|
||||
for i := range h.HostOptions.EngineOptions.Env {
|
||||
if h.HostOptions.EngineOptions.Env[i] != config.DockerEnv[i] {
|
||||
if h.HostOptions.EngineOptions.Env[i] != cfg.DockerEnv[i] {
|
||||
t.Fatal("Docker env variables were not set!")
|
||||
}
|
||||
}
|
||||
|
||||
for i := range h.HostOptions.EngineOptions.ArbitraryFlags {
|
||||
if h.HostOptions.EngineOptions.ArbitraryFlags[i] != config.DockerOpt[i] {
|
||||
if h.HostOptions.EngineOptions.ArbitraryFlags[i] != cfg.DockerOpt[i] {
|
||||
t.Fatal("Docker flags were not set!")
|
||||
}
|
||||
}
|
||||
|
@ -298,13 +307,16 @@ func TestStopHostError(t *testing.T) {
|
|||
func TestStopHost(t *testing.T) {
|
||||
RegisterMockDriver(t)
|
||||
api := tests.NewMockAPI(t)
|
||||
h, err := createHost(api, defaultClusterConfig)
|
||||
h, err := createHost(api, defaultClusterConfig, config.Node{Name: "minikube"})
|
||||
if err != nil {
|
||||
t.Errorf("createHost failed: %v", err)
|
||||
}
|
||||
|
||||
if err := StopHost(api, viper.GetString("profile")); err != nil {
|
||||
t.Fatal("An error should be thrown when stopping non-existing machine.")
|
||||
cc := defaultClusterConfig
|
||||
cc.Name = viper.GetString("profile")
|
||||
m := driver.MachineName(cc, config.Node{Name: "minikube"})
|
||||
if err := StopHost(api, m); err != nil {
|
||||
t.Fatalf("Unexpected error stopping machine: %v", err)
|
||||
}
|
||||
if s, _ := h.Driver.GetState(); s != state.Stopped {
|
||||
t.Fatalf("Machine not stopped. Currently in state: %s", s)
|
||||
|
@ -314,11 +326,14 @@ func TestStopHost(t *testing.T) {
|
|||
func TestDeleteHost(t *testing.T) {
|
||||
RegisterMockDriver(t)
|
||||
api := tests.NewMockAPI(t)
|
||||
if _, err := createHost(api, defaultClusterConfig); err != nil {
|
||||
if _, err := createHost(api, defaultClusterConfig, config.Node{Name: "minikube"}); err != nil {
|
||||
t.Errorf("createHost failed: %v", err)
|
||||
}
|
||||
|
||||
if err := DeleteHost(api, viper.GetString("profile")); err != nil {
|
||||
cc := defaultClusterConfig
|
||||
cc.Name = viper.GetString("profile")
|
||||
|
||||
if err := DeleteHost(api, driver.MachineName(cc, config.Node{Name: "minikube"})); err != nil {
|
||||
t.Fatalf("Unexpected error deleting host: %v", err)
|
||||
}
|
||||
}
|
||||
|
@ -326,7 +341,7 @@ func TestDeleteHost(t *testing.T) {
|
|||
func TestDeleteHostErrorDeletingVM(t *testing.T) {
|
||||
RegisterMockDriver(t)
|
||||
api := tests.NewMockAPI(t)
|
||||
h, err := createHost(api, defaultClusterConfig)
|
||||
h, err := createHost(api, defaultClusterConfig, config.Node{Name: "minikube"})
|
||||
if err != nil {
|
||||
t.Errorf("createHost failed: %v", err)
|
||||
}
|
||||
|
@ -334,7 +349,7 @@ func TestDeleteHostErrorDeletingVM(t *testing.T) {
|
|||
d := &tests.MockDriver{RemoveError: true, T: t}
|
||||
h.Driver = d
|
||||
|
||||
if err := DeleteHost(api, viper.GetString("profile")); err == nil {
|
||||
if err := DeleteHost(api, driver.MachineName(defaultClusterConfig, config.Node{Name: "minikube"})); err == nil {
|
||||
t.Fatal("Expected error deleting host.")
|
||||
}
|
||||
}
|
||||
|
@ -343,11 +358,11 @@ func TestDeleteHostErrorDeletingFiles(t *testing.T) {
|
|||
RegisterMockDriver(t)
|
||||
api := tests.NewMockAPI(t)
|
||||
api.RemoveError = true
|
||||
if _, err := createHost(api, defaultClusterConfig); err != nil {
|
||||
if _, err := createHost(api, defaultClusterConfig, config.Node{Name: "minikube"}); err != nil {
|
||||
t.Errorf("createHost failed: %v", err)
|
||||
}
|
||||
|
||||
if err := DeleteHost(api, viper.GetString("profile")); err == nil {
|
||||
if err := DeleteHost(api, driver.MachineName(defaultClusterConfig, config.Node{Name: "minikube"})); err == nil {
|
||||
t.Fatal("Expected error deleting host.")
|
||||
}
|
||||
}
|
||||
|
@ -357,12 +372,12 @@ func TestDeleteHostErrMachineNotExist(t *testing.T) {
|
|||
api := tests.NewMockAPI(t)
|
||||
// Create an incomplete host with machine does not exist error(i.e. User Interrupt Cancel)
|
||||
api.NotExistError = true
|
||||
_, err := createHost(api, defaultClusterConfig)
|
||||
_, err := createHost(api, defaultClusterConfig, config.Node{Name: "minikube"})
|
||||
if err != nil {
|
||||
t.Errorf("createHost failed: %v", err)
|
||||
}
|
||||
|
||||
if err := DeleteHost(api, viper.GetString("profile")); err == nil {
|
||||
if err := DeleteHost(api, driver.MachineName(defaultClusterConfig, config.Node{Name: "minikube"})); err == nil {
|
||||
t.Fatal("Expected error deleting host.")
|
||||
}
|
||||
}
|
||||
|
@ -371,28 +386,37 @@ func TestGetHostStatus(t *testing.T) {
|
|||
RegisterMockDriver(t)
|
||||
api := tests.NewMockAPI(t)
|
||||
|
||||
checkState := func(expected string) {
|
||||
s, err := GetHostStatus(api, viper.GetString("profile"))
|
||||
cc := defaultClusterConfig
|
||||
cc.Name = viper.GetString("profile")
|
||||
|
||||
m := driver.MachineName(cc, config.Node{Name: "minikube"})
|
||||
|
||||
checkState := func(expected string, machineName string) {
|
||||
s, err := GetHostStatus(api, machineName)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error getting status: %v", err)
|
||||
}
|
||||
if s != expected {
|
||||
t.Fatalf("Expected status: %s, got %s", s, expected)
|
||||
t.Fatalf("Expected status: %s, got: %s", expected, s)
|
||||
}
|
||||
}
|
||||
|
||||
checkState(state.None.String())
|
||||
checkState(state.None.String(), m)
|
||||
|
||||
if _, err := createHost(api, defaultClusterConfig); err != nil {
|
||||
if _, err := createHost(api, cc, config.Node{Name: "minikube"}); err != nil {
|
||||
t.Errorf("createHost failed: %v", err)
|
||||
}
|
||||
|
||||
checkState(state.Running.String())
|
||||
cc.Name = viper.GetString("profile")
|
||||
|
||||
if err := StopHost(api, viper.GetString("profile")); err != nil {
|
||||
m = driver.MachineName(cc, config.Node{Name: "minikube"})
|
||||
|
||||
checkState(state.Running.String(), m)
|
||||
|
||||
if err := StopHost(api, m); err != nil {
|
||||
t.Errorf("StopHost failed: %v", err)
|
||||
}
|
||||
checkState(state.Stopped.String())
|
||||
checkState(state.Stopped.String(), m)
|
||||
}
|
||||
|
||||
func TestCreateSSHShell(t *testing.T) {
|
||||
|
@ -415,8 +439,11 @@ func TestCreateSSHShell(t *testing.T) {
|
|||
}
|
||||
api.Hosts[viper.GetString("profile")] = &host.Host{Driver: d}
|
||||
|
||||
cc := defaultClusterConfig
|
||||
cc.Name = viper.GetString("profile")
|
||||
|
||||
cliArgs := []string{"exit"}
|
||||
if err := CreateSSHShell(api, cliArgs); err != nil {
|
||||
if err := CreateSSHShell(api, cc, config.Node{Name: "minikube"}, cliArgs); err != nil {
|
||||
t.Fatalf("Error running ssh command: %v", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -56,22 +56,22 @@ var (
|
|||
)
|
||||
|
||||
// fixHost fixes up a previously configured VM so that it is ready to run Kubernetes
|
||||
func fixHost(api libmachine.API, mc config.ClusterConfig) (*host.Host, error) {
|
||||
func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.Host, error) {
|
||||
out.T(out.Waiting, "Reconfiguring existing host ...")
|
||||
|
||||
start := time.Now()
|
||||
glog.Infof("fixHost starting: %s", mc.Name)
|
||||
glog.Infof("fixHost starting: %s", n.Name)
|
||||
defer func() {
|
||||
glog.Infof("fixHost completed within %s", time.Since(start))
|
||||
}()
|
||||
|
||||
h, err := api.Load(mc.Name)
|
||||
h, err := api.Load(driver.MachineName(cc, n))
|
||||
if err != nil {
|
||||
return h, errors.Wrap(err, "Error loading existing host. Please try running [minikube delete], then run [minikube start] again.")
|
||||
}
|
||||
|
||||
// check if need to re-run docker-env
|
||||
maybeWarnAboutEvalEnv(mc.Driver, mc.Name)
|
||||
maybeWarnAboutEvalEnv(cc.Driver, cc.Name)
|
||||
|
||||
s, err := h.Driver.GetState()
|
||||
if err != nil || s == state.Stopped || s == state.None {
|
||||
|
@ -88,12 +88,12 @@ func fixHost(api libmachine.API, mc config.ClusterConfig) (*host.Host, error) {
|
|||
}
|
||||
}
|
||||
// remove machine config directory
|
||||
if err := api.Remove(mc.Name); err != nil {
|
||||
if err := api.Remove(cc.Name); err != nil {
|
||||
return nil, errors.Wrap(err, "api remove")
|
||||
}
|
||||
// recreate virtual machine
|
||||
out.T(out.Meh, "machine '{{.name}}' does not exist. Proceeding ahead with recreating VM.", out.V{"name": mc.Name})
|
||||
h, err = createHost(api, mc)
|
||||
out.T(out.Meh, "machine '{{.name}}' does not exist. Proceeding ahead with recreating VM.", out.V{"name": cc.Name})
|
||||
h, err = createHost(api, cc, n)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error recreating VM")
|
||||
}
|
||||
|
@ -106,9 +106,9 @@ func fixHost(api libmachine.API, mc config.ClusterConfig) (*host.Host, error) {
|
|||
}
|
||||
|
||||
if s == state.Running {
|
||||
out.T(out.Running, `Using the running {{.driver_name}} "{{.profile_name}}" VM ...`, out.V{"driver_name": mc.Driver, "profile_name": mc.Name})
|
||||
out.T(out.Running, `Using the running {{.driver_name}} "{{.profile_name}}" VM ...`, out.V{"driver_name": cc.Driver, "profile_name": cc.Name})
|
||||
} else {
|
||||
out.T(out.Restarting, `Starting existing {{.driver_name}} VM for "{{.profile_name}}" ...`, out.V{"driver_name": mc.Driver, "profile_name": mc.Name})
|
||||
out.T(out.Restarting, `Starting existing {{.driver_name}} VM for "{{.profile_name}}" ...`, out.V{"driver_name": cc.Driver, "profile_name": cc.Name})
|
||||
if err := h.Driver.Start(); err != nil {
|
||||
return h, errors.Wrap(err, "driver start")
|
||||
}
|
||||
|
@ -117,7 +117,7 @@ func fixHost(api libmachine.API, mc config.ClusterConfig) (*host.Host, error) {
|
|||
}
|
||||
}
|
||||
|
||||
e := engineOptions(mc)
|
||||
e := engineOptions(cc)
|
||||
if len(e.Env) > 0 {
|
||||
h.HostOptions.EngineOptions.Env = e.Env
|
||||
glog.Infof("Detecting provisioner ...")
|
||||
|
@ -134,7 +134,7 @@ func fixHost(api libmachine.API, mc config.ClusterConfig) (*host.Host, error) {
|
|||
return h, nil
|
||||
}
|
||||
|
||||
if err := postStartSetup(h, mc); err != nil {
|
||||
if err := postStartSetup(h, cc); err != nil {
|
||||
return h, errors.Wrap(err, "post-start")
|
||||
}
|
||||
|
||||
|
@ -147,7 +147,7 @@ func fixHost(api libmachine.API, mc config.ClusterConfig) (*host.Host, error) {
|
|||
if err := h.ConfigureAuth(); err != nil {
|
||||
return h, &retry.RetriableError{Err: errors.Wrap(err, "Error configuring auth on host")}
|
||||
}
|
||||
return h, ensureSyncedGuestClock(h, mc.Driver)
|
||||
return h, ensureSyncedGuestClock(h, cc.Driver)
|
||||
}
|
||||
|
||||
// maybeWarnAboutEvalEnv wil warn user if they need to re-eval their docker-env, podman-env
|
||||
|
|
|
@ -25,8 +25,8 @@ import (
|
|||
"github.com/docker/machine/libmachine/state"
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/localpath"
|
||||
)
|
||||
|
||||
|
@ -86,7 +86,7 @@ func List(miniHome ...string) (validMachines []*Machine, inValidMachines []*Mach
|
|||
return validMachines, inValidMachines, nil
|
||||
}
|
||||
|
||||
// Load loads a machine or throws an error if the machine could not be loadedG
|
||||
// Load loads a machine or throws an error if the machine could not be loaded.
|
||||
func Load(name string) (*Machine, error) {
|
||||
api, err := NewAPIClient()
|
||||
if err != nil {
|
||||
|
@ -124,8 +124,8 @@ func machineDirs(miniHome ...string) (dirs []string, err error) {
|
|||
}
|
||||
|
||||
// CreateSSHShell creates a new SSH shell / client
|
||||
func CreateSSHShell(api libmachine.API, args []string) error {
|
||||
machineName := viper.GetString(config.MachineProfile)
|
||||
func CreateSSHShell(api libmachine.API, cc config.ClusterConfig, n config.Node, args []string) error {
|
||||
machineName := driver.MachineName(cc, n)
|
||||
host, err := CheckIfHostExistsAndLoad(api, machineName)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "host exists and load")
|
||||
|
|
|
@ -34,7 +34,7 @@ func TestListMachines(t *testing.T) {
|
|||
totalNumberOfMachines = numberOfValidMachines + numberOfInValidMachines
|
||||
)
|
||||
|
||||
viper.Set(config.MachineProfile, "")
|
||||
viper.Set(config.ProfileName, "")
|
||||
|
||||
testMinikubeDir := "./testdata/list-machines/.minikube"
|
||||
miniDir, err := filepath.Abs(testMinikubeDir)
|
||||
|
|
|
@ -62,7 +62,7 @@ var (
|
|||
)
|
||||
|
||||
// StartHost starts a host VM.
|
||||
func StartHost(api libmachine.API, cfg config.ClusterConfig) (*host.Host, error) {
|
||||
func StartHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*host.Host, error) {
|
||||
// Prevent machine-driver boot races, as well as our own certificate race
|
||||
releaser, err := acquireMachinesLock(cfg.Name)
|
||||
if err != nil {
|
||||
|
@ -80,10 +80,10 @@ func StartHost(api libmachine.API, cfg config.ClusterConfig) (*host.Host, error)
|
|||
}
|
||||
if !exists {
|
||||
glog.Infof("Provisioning new machine with config: %+v", cfg)
|
||||
return createHost(api, cfg)
|
||||
return createHost(api, cfg, n)
|
||||
}
|
||||
glog.Infoln("Skipping create...Using existing machine configuration")
|
||||
return fixHost(api, cfg)
|
||||
return fixHost(api, cfg, n)
|
||||
}
|
||||
|
||||
func engineOptions(cfg config.ClusterConfig) *engine.Options {
|
||||
|
@ -97,7 +97,7 @@ func engineOptions(cfg config.ClusterConfig) *engine.Options {
|
|||
return &o
|
||||
}
|
||||
|
||||
func createHost(api libmachine.API, cfg config.ClusterConfig) (*host.Host, error) {
|
||||
func createHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*host.Host, error) {
|
||||
glog.Infof("createHost starting for %q (driver=%q)", cfg.Name, cfg.Driver)
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
|
@ -115,7 +115,7 @@ func createHost(api libmachine.API, cfg config.ClusterConfig) (*host.Host, error
|
|||
if def.Empty() {
|
||||
return nil, fmt.Errorf("unsupported/missing driver: %s", cfg.Driver)
|
||||
}
|
||||
dd, err := def.Config(cfg)
|
||||
dd, err := def.Config(cfg, n)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "config")
|
||||
}
|
||||
|
|
|
@ -86,7 +86,7 @@ func showVersionInfo(k8sVersion string, cr cruntime.Manager) {
|
|||
|
||||
// setupKubeAdm adds any requested files into the VM before Kubernetes is started
|
||||
func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, node config.Node) bootstrapper.Bootstrapper {
|
||||
bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper))
|
||||
bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cfg, node)
|
||||
if err != nil {
|
||||
exit.WithError("Failed to get bootstrapper", err)
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@ func startMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.
|
|||
if err != nil {
|
||||
exit.WithError("Failed to get machine client", err)
|
||||
}
|
||||
host, preExists = startHost(m, *cfg)
|
||||
host, preExists = startHost(m, *cfg, *node)
|
||||
runner, err = machine.CommandRunner(host)
|
||||
if err != nil {
|
||||
exit.WithError("Failed to get command runner", err)
|
||||
|
@ -68,13 +68,13 @@ func startMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.
|
|||
}
|
||||
|
||||
// startHost starts a new minikube host using a VM or None
|
||||
func startHost(api libmachine.API, mc config.ClusterConfig) (*host.Host, bool) {
|
||||
func startHost(api libmachine.API, mc config.ClusterConfig, n config.Node) (*host.Host, bool) {
|
||||
exists, err := api.Exists(mc.Name)
|
||||
if err != nil {
|
||||
exit.WithError("Failed to check if machine exists", err)
|
||||
}
|
||||
|
||||
host, err := machine.StartHost(api, mc)
|
||||
host, err := machine.StartHost(api, mc, n)
|
||||
if err != nil {
|
||||
exit.WithError("Unable to start VM. Please investigate and run 'minikube delete' if possible", err)
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
)
|
||||
|
||||
|
@ -70,28 +71,23 @@ func Add(cc *config.ClusterConfig, name string, controlPlane bool, worker bool,
|
|||
|
||||
// Delete stops and deletes the given node from the given cluster
|
||||
func Delete(cc config.ClusterConfig, name string) error {
|
||||
_, index, err := Retrieve(&cc, name)
|
||||
n, index, err := Retrieve(&cc, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
/*err = Stop(cc, nd)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to stop node %s. Will still try to delete.", name)
|
||||
}*/
|
||||
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = machine.DeleteHost(api, name)
|
||||
err = machine.DeleteHost(api, driver.MachineName(cc, *n))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cc.Nodes = append(cc.Nodes[:index], cc.Nodes[index+1:]...)
|
||||
return config.SaveProfile(viper.GetString(config.MachineProfile), &cc)
|
||||
return config.SaveProfile(viper.GetString(config.ProfileName), &cc)
|
||||
}
|
||||
|
||||
// Retrieve finds the node by name in the given cluster
|
||||
|
@ -119,5 +115,5 @@ func Save(cfg *config.ClusterConfig, node *config.Node) error {
|
|||
if !update {
|
||||
cfg.Nodes = append(cfg.Nodes, *node)
|
||||
}
|
||||
return config.SaveProfile(viper.GetString(config.MachineProfile), cfg)
|
||||
return config.SaveProfile(viper.GetString(config.ProfileName), cfg)
|
||||
}
|
||||
|
|
|
@ -53,7 +53,7 @@ func Start(mc config.ClusterConfig, n config.Node, primary bool, existingAddons
|
|||
|
||||
// Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot.
|
||||
// Hence, saveConfig must be called before startHost, and again afterwards when we know the IP.
|
||||
if err := config.SaveProfile(viper.GetString(config.MachineProfile), &mc); err != nil {
|
||||
if err := config.SaveProfile(viper.GetString(config.ProfileName), &mc); err != nil {
|
||||
exit.WithError("Failed to save config", err)
|
||||
}
|
||||
|
||||
|
@ -88,7 +88,7 @@ func Start(mc config.ClusterConfig, n config.Node, primary bool, existingAddons
|
|||
|
||||
// enable addons, both old and new!
|
||||
if existingAddons != nil {
|
||||
addons.Start(viper.GetString(config.MachineProfile), existingAddons, AddonList)
|
||||
addons.Start(viper.GetString(config.ProfileName), existingAddons, AddonList)
|
||||
}
|
||||
|
||||
if err = CacheAndLoadImagesInConfig(); err != nil {
|
||||
|
|
|
@ -43,17 +43,17 @@ func init() {
|
|||
}
|
||||
}
|
||||
|
||||
func configure(mc config.ClusterConfig) (interface{}, error) {
|
||||
func configure(cc config.ClusterConfig, n config.Node) (interface{}, error) {
|
||||
return kic.NewDriver(kic.Config{
|
||||
MachineName: mc.Name,
|
||||
MachineName: driver.MachineName(cc, n),
|
||||
StorePath: localpath.MiniPath(),
|
||||
ImageDigest: kic.BaseImage,
|
||||
CPU: mc.CPUs,
|
||||
Memory: mc.Memory,
|
||||
CPU: cc.CPUs,
|
||||
Memory: cc.Memory,
|
||||
OCIBinary: oci.Docker,
|
||||
APIServerPort: mc.Nodes[0].Port,
|
||||
KubernetesVersion: mc.KubernetesConfig.KubernetesVersion,
|
||||
ContainerRuntime: mc.KubernetesConfig.ContainerRuntime,
|
||||
APIServerPort: cc.Nodes[0].Port,
|
||||
KubernetesVersion: cc.KubernetesConfig.KubernetesVersion,
|
||||
ContainerRuntime: cc.KubernetesConfig.ContainerRuntime,
|
||||
}), nil
|
||||
}
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ import (
|
|||
"github.com/pborman/uuid"
|
||||
|
||||
"k8s.io/minikube/pkg/drivers/hyperkit"
|
||||
cfg "k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/localpath"
|
||||
"k8s.io/minikube/pkg/minikube/registry"
|
||||
|
@ -57,28 +57,28 @@ func init() {
|
|||
}
|
||||
}
|
||||
|
||||
func configure(config cfg.ClusterConfig) (interface{}, error) {
|
||||
u := config.UUID
|
||||
func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) {
|
||||
u := cfg.UUID
|
||||
if u == "" {
|
||||
u = uuid.NewUUID().String()
|
||||
}
|
||||
|
||||
return &hyperkit.Driver{
|
||||
BaseDriver: &drivers.BaseDriver{
|
||||
MachineName: config.Name,
|
||||
MachineName: driver.MachineName(cfg, n),
|
||||
StorePath: localpath.MiniPath(),
|
||||
SSHUser: "docker",
|
||||
},
|
||||
Boot2DockerURL: config.Downloader.GetISOFileURI(config.MinikubeISO),
|
||||
DiskSize: config.DiskSize,
|
||||
Memory: config.Memory,
|
||||
CPU: config.CPUs,
|
||||
NFSShares: config.NFSShare,
|
||||
NFSSharesRoot: config.NFSSharesRoot,
|
||||
Boot2DockerURL: cfg.Downloader.GetISOFileURI(cfg.MinikubeISO),
|
||||
DiskSize: cfg.DiskSize,
|
||||
Memory: cfg.Memory,
|
||||
CPU: cfg.CPUs,
|
||||
NFSShares: cfg.NFSShare,
|
||||
NFSSharesRoot: cfg.NFSSharesRoot,
|
||||
UUID: u,
|
||||
VpnKitSock: config.HyperkitVpnKitSock,
|
||||
VSockPorts: config.HyperkitVSockPorts,
|
||||
Cmdline: "loglevel=3 console=ttyS0 console=tty0 noembed nomodeset norestore waitusb=10 systemd.legacy_systemd_cgroup_controller=yes random.trust_cpu=on hw_rng_model=virtio base host=" + config.Name,
|
||||
VpnKitSock: cfg.HyperkitVpnKitSock,
|
||||
VSockPorts: cfg.HyperkitVSockPorts,
|
||||
Cmdline: "loglevel=3 console=ttyS0 console=tty0 noembed nomodeset norestore waitusb=10 systemd.legacy_systemd_cgroup_controller=yes random.trust_cpu=on hw_rng_model=virtio base host=" + cfg.Name,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ import (
|
|||
"github.com/docker/machine/libmachine/drivers"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
cfg "k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/localpath"
|
||||
"k8s.io/minikube/pkg/minikube/registry"
|
||||
|
@ -52,16 +52,16 @@ func init() {
|
|||
}
|
||||
}
|
||||
|
||||
func configure(config cfg.ClusterConfig) (interface{}, error) {
|
||||
d := hyperv.NewDriver(config.Name, localpath.MiniPath())
|
||||
d.Boot2DockerURL = config.Downloader.GetISOFileURI(config.MinikubeISO)
|
||||
d.VSwitch = config.HypervVirtualSwitch
|
||||
if d.VSwitch == "" && config.HypervUseExternalSwitch {
|
||||
switchName, adapter, err := chooseSwitch(config.HypervExternalAdapter)
|
||||
func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) {
|
||||
d := hyperv.NewDriver(driver.MachineName(cfg, n), localpath.MiniPath())
|
||||
d.Boot2DockerURL = cfg.Downloader.GetISOFileURI(cfg.MinikubeISO)
|
||||
d.VSwitch = cfg.HypervVirtualSwitch
|
||||
if d.VSwitch == "" && cfg.HypervUseExternalSwitch {
|
||||
switchName, adapter, err := chooseSwitch(cfg.HypervExternalAdapter)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to choose switch for Hyper-V driver")
|
||||
}
|
||||
if config.HypervExternalAdapter == "" && switchName == "" {
|
||||
if cfg.HypervExternalAdapter == "" && switchName == "" {
|
||||
// create a switch on the returned adapter
|
||||
switchName = defaultExternalSwitchName
|
||||
err := createVMSwitch(switchName, adapter)
|
||||
|
@ -71,9 +71,9 @@ func configure(config cfg.ClusterConfig) (interface{}, error) {
|
|||
}
|
||||
d.VSwitch = switchName
|
||||
}
|
||||
d.MemSize = config.Memory
|
||||
d.CPU = config.CPUs
|
||||
d.DiskSize = config.DiskSize
|
||||
d.MemSize = cfg.Memory
|
||||
d.CPU = cfg.CPUs
|
||||
d.DiskSize = cfg.DiskSize
|
||||
d.SSHUser = "docker"
|
||||
d.DisableDynamicMemory = true // default to disable dynamic memory as minikube is unlikely to work properly with dynamic memory
|
||||
return d, nil
|
||||
|
|
|
@ -67,25 +67,25 @@ type kvmDriver struct {
|
|||
ConnectionURI string
|
||||
}
|
||||
|
||||
func configure(mc config.ClusterConfig) (interface{}, error) {
|
||||
name := mc.Name
|
||||
func configure(cc config.ClusterConfig, n config.Node) (interface{}, error) {
|
||||
name := driver.MachineName(cc, n)
|
||||
return kvmDriver{
|
||||
BaseDriver: &drivers.BaseDriver{
|
||||
MachineName: name,
|
||||
StorePath: localpath.MiniPath(),
|
||||
SSHUser: "docker",
|
||||
},
|
||||
Memory: mc.Memory,
|
||||
CPU: mc.CPUs,
|
||||
Network: mc.KVMNetwork,
|
||||
Memory: cc.Memory,
|
||||
CPU: cc.CPUs,
|
||||
Network: cc.KVMNetwork,
|
||||
PrivateNetwork: "minikube-net",
|
||||
Boot2DockerURL: mc.Downloader.GetISOFileURI(mc.MinikubeISO),
|
||||
DiskSize: mc.DiskSize,
|
||||
Boot2DockerURL: cc.Downloader.GetISOFileURI(cc.MinikubeISO),
|
||||
DiskSize: cc.DiskSize,
|
||||
DiskPath: filepath.Join(localpath.MiniPath(), "machines", name, fmt.Sprintf("%s.rawdisk", name)),
|
||||
ISO: filepath.Join(localpath.MiniPath(), "machines", name, "boot2docker.iso"),
|
||||
GPU: mc.KVMGPU,
|
||||
Hidden: mc.KVMHidden,
|
||||
ConnectionURI: mc.KVMQemuURI,
|
||||
GPU: cc.KVMGPU,
|
||||
Hidden: cc.KVMHidden,
|
||||
ConnectionURI: cc.KVMQemuURI,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -42,11 +42,11 @@ func init() {
|
|||
}
|
||||
}
|
||||
|
||||
func configure(mc config.ClusterConfig) (interface{}, error) {
|
||||
func configure(cc config.ClusterConfig, n config.Node) (interface{}, error) {
|
||||
return none.NewDriver(none.Config{
|
||||
MachineName: mc.Name,
|
||||
MachineName: driver.MachineName(cc, n),
|
||||
StorePath: localpath.MiniPath(),
|
||||
ContainerRuntime: mc.KubernetesConfig.ContainerRuntime,
|
||||
ContainerRuntime: cc.KubernetesConfig.ContainerRuntime,
|
||||
}), nil
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
|
||||
parallels "github.com/Parallels/docker-machine-parallels"
|
||||
"github.com/docker/machine/libmachine/drivers"
|
||||
cfg "k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/localpath"
|
||||
"k8s.io/minikube/pkg/minikube/registry"
|
||||
|
@ -44,12 +44,12 @@ func init() {
|
|||
|
||||
}
|
||||
|
||||
func configure(config cfg.ClusterConfig) (interface{}, error) {
|
||||
d := parallels.NewDriver(config.Name, localpath.MiniPath()).(*parallels.Driver)
|
||||
d.Boot2DockerURL = config.Downloader.GetISOFileURI(config.MinikubeISO)
|
||||
d.Memory = config.Memory
|
||||
d.CPU = config.CPUs
|
||||
d.DiskSize = config.DiskSize
|
||||
func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) {
|
||||
d := parallels.NewDriver(driver.MachineName(cfg, n), localpath.MiniPath()).(*parallels.Driver)
|
||||
d.Boot2DockerURL = cfg.Downloader.GetISOFileURI(cfg.MinikubeISO)
|
||||
d.Memory = cfg.Memory
|
||||
d.CPU = cfg.CPUs
|
||||
d.DiskSize = cfg.DiskSize
|
||||
return d, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -49,15 +49,15 @@ func init() {
|
|||
}
|
||||
}
|
||||
|
||||
func configure(mc config.ClusterConfig) (interface{}, error) {
|
||||
func configure(cc config.ClusterConfig, n config.Node) (interface{}, error) {
|
||||
return kic.NewDriver(kic.Config{
|
||||
MachineName: mc.Name,
|
||||
MachineName: driver.MachineName(cc, n),
|
||||
StorePath: localpath.MiniPath(),
|
||||
ImageDigest: strings.Split(kic.BaseImage, "@")[0], // for podman does not support docker images references with both a tag and digest.
|
||||
CPU: mc.CPUs,
|
||||
Memory: mc.Memory,
|
||||
CPU: cc.CPUs,
|
||||
Memory: cc.Memory,
|
||||
OCIBinary: oci.Podman,
|
||||
APIServerPort: mc.Nodes[0].Port,
|
||||
APIServerPort: cc.Nodes[0].Port,
|
||||
}), nil
|
||||
}
|
||||
|
||||
|
|
|
@ -49,19 +49,19 @@ func init() {
|
|||
}
|
||||
}
|
||||
|
||||
func configure(mc config.ClusterConfig) (interface{}, error) {
|
||||
d := virtualbox.NewDriver(mc.Name, localpath.MiniPath())
|
||||
d.Boot2DockerURL = mc.Downloader.GetISOFileURI(mc.MinikubeISO)
|
||||
d.Memory = mc.Memory
|
||||
d.CPU = mc.CPUs
|
||||
d.DiskSize = mc.DiskSize
|
||||
d.HostOnlyCIDR = mc.HostOnlyCIDR
|
||||
d.NoShare = mc.DisableDriverMounts
|
||||
d.NoVTXCheck = mc.NoVTXCheck
|
||||
d.NatNicType = mc.NatNicType
|
||||
d.HostOnlyNicType = mc.HostOnlyNicType
|
||||
d.DNSProxy = mc.DNSProxy
|
||||
d.HostDNSResolver = mc.HostDNSResolver
|
||||
func configure(cc config.ClusterConfig, n config.Node) (interface{}, error) {
|
||||
d := virtualbox.NewDriver(driver.MachineName(cc, n), localpath.MiniPath())
|
||||
d.Boot2DockerURL = cc.Downloader.GetISOFileURI(cc.MinikubeISO)
|
||||
d.Memory = cc.Memory
|
||||
d.CPU = cc.CPUs
|
||||
d.DiskSize = cc.DiskSize
|
||||
d.HostOnlyCIDR = cc.HostOnlyCIDR
|
||||
d.NoShare = cc.DisableDriverMounts
|
||||
d.NoVTXCheck = cc.NoVTXCheck
|
||||
d.NatNicType = cc.NatNicType
|
||||
d.HostOnlyNicType = cc.HostOnlyNicType
|
||||
d.DNSProxy = cc.DNSProxy
|
||||
d.HostDNSResolver = cc.HostDNSResolver
|
||||
return d, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -39,12 +39,12 @@ func init() {
|
|||
}
|
||||
}
|
||||
|
||||
func configure(mc config.ClusterConfig) (interface{}, error) {
|
||||
d := vmwcfg.NewConfig(mc.Name, localpath.MiniPath())
|
||||
d.Boot2DockerURL = mc.Downloader.GetISOFileURI(mc.MinikubeISO)
|
||||
d.Memory = mc.Memory
|
||||
d.CPU = mc.CPUs
|
||||
d.DiskSize = mc.DiskSize
|
||||
func configure(cc config.ClusterConfig, n config.Node) (interface{}, error) {
|
||||
d := vmwcfg.NewConfig(driver.MachineName(cc, n), localpath.MiniPath())
|
||||
d.Boot2DockerURL = cc.Downloader.GetISOFileURI(cc.MinikubeISO)
|
||||
d.Memory = cc.Memory
|
||||
d.CPU = cc.CPUs
|
||||
d.DiskSize = cc.DiskSize
|
||||
|
||||
// TODO(frapposelli): push these defaults upstream to fixup this driver
|
||||
d.SSHPort = 22
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
"github.com/docker/machine/libmachine/drivers"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
cfg "k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/localpath"
|
||||
"k8s.io/minikube/pkg/minikube/registry"
|
||||
|
@ -44,12 +44,12 @@ func init() {
|
|||
}
|
||||
}
|
||||
|
||||
func configure(config cfg.ClusterConfig) (interface{}, error) {
|
||||
d := vmwarefusion.NewDriver(config.Name, localpath.MiniPath()).(*vmwarefusion.Driver)
|
||||
d.Boot2DockerURL = config.Downloader.GetISOFileURI(config.MinikubeISO)
|
||||
d.Memory = config.Memory
|
||||
d.CPU = config.CPUs
|
||||
d.DiskSize = config.DiskSize
|
||||
func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) {
|
||||
d := vmwarefusion.NewDriver(driver.MachineName(cfg, n), localpath.MiniPath()).(*vmwarefusion.Driver)
|
||||
d.Boot2DockerURL = cfg.Downloader.GetISOFileURI(cfg.MinikubeISO)
|
||||
d.Memory = cfg.Memory
|
||||
d.CPU = cfg.CPUs
|
||||
d.DiskSize = cfg.DiskSize
|
||||
|
||||
// TODO(philips): push these defaults upstream to fixup this driver
|
||||
d.SSHPort = 22
|
||||
|
|
|
@ -60,7 +60,7 @@ type Registry interface {
|
|||
}
|
||||
|
||||
// Configurator emits a struct to be marshalled into JSON for Machine Driver
|
||||
type Configurator func(config.ClusterConfig) (interface{}, error)
|
||||
type Configurator func(config.ClusterConfig, config.Node) (interface{}, error)
|
||||
|
||||
// Loader is a function that loads a byte stream and creates a driver.
|
||||
type Loader func() drivers.Driver
|
||||
|
|
|
@ -81,7 +81,7 @@ func (k *K8sClientGetter) GetCoreClient() (typed_core.CoreV1Interface, error) {
|
|||
// GetClientset returns a clientset
|
||||
func (*K8sClientGetter) GetClientset(timeout time.Duration) (*kubernetes.Clientset, error) {
|
||||
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
|
||||
profile := viper.GetString(config.MachineProfile)
|
||||
profile := viper.GetString(config.ProfileName)
|
||||
configOverrides := &clientcmd.ConfigOverrides{
|
||||
Context: clientcmdapi.Context{
|
||||
Cluster: profile,
|
||||
|
@ -119,7 +119,7 @@ type URLs []SvcURL
|
|||
// GetServiceURLs returns a SvcURL object for every service in a particular namespace.
|
||||
// Accepts a template for formatting
|
||||
func GetServiceURLs(api libmachine.API, namespace string, t *template.Template) (URLs, error) {
|
||||
host, err := machine.CheckIfHostExistsAndLoad(api, viper.GetString(config.MachineProfile))
|
||||
host, err := machine.CheckIfHostExistsAndLoad(api, viper.GetString(config.ProfileName))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -155,7 +155,7 @@ func GetServiceURLs(api libmachine.API, namespace string, t *template.Template)
|
|||
|
||||
// GetServiceURLsForService returns a SvcUrl object for a service in a namespace. Supports optional formatting.
|
||||
func GetServiceURLsForService(api libmachine.API, namespace, service string, t *template.Template) (SvcURL, error) {
|
||||
host, err := machine.CheckIfHostExistsAndLoad(api, viper.GetString(config.MachineProfile))
|
||||
host, err := machine.CheckIfHostExistsAndLoad(api, viper.GetString(config.ProfileName))
|
||||
if err != nil {
|
||||
return SvcURL{}, errors.Wrap(err, "Error checking if api exist and loading it")
|
||||
}
|
||||
|
|
|
@ -417,15 +417,15 @@ func TestGetServiceURLs(t *testing.T) {
|
|||
defaultAPI := &tests.MockAPI{
|
||||
FakeStore: tests.FakeStore{
|
||||
Hosts: map[string]*host.Host{
|
||||
constants.DefaultMachineName: {
|
||||
Name: constants.DefaultMachineName,
|
||||
constants.DefaultClusterName: {
|
||||
Name: constants.DefaultClusterName,
|
||||
Driver: &tests.MockDriver{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
defaultTemplate := template.Must(template.New("svc-template").Parse("http://{{.IP}}:{{.Port}}"))
|
||||
viper.Set(config.MachineProfile, constants.DefaultMachineName)
|
||||
viper.Set(config.ProfileName, constants.DefaultClusterName)
|
||||
|
||||
var tests = []struct {
|
||||
description string
|
||||
|
@ -490,15 +490,15 @@ func TestGetServiceURLsForService(t *testing.T) {
|
|||
defaultAPI := &tests.MockAPI{
|
||||
FakeStore: tests.FakeStore{
|
||||
Hosts: map[string]*host.Host{
|
||||
constants.DefaultMachineName: {
|
||||
Name: constants.DefaultMachineName,
|
||||
constants.DefaultClusterName: {
|
||||
Name: constants.DefaultClusterName,
|
||||
Driver: &tests.MockDriver{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
defaultTemplate := template.Must(template.New("svc-template").Parse("http://{{.IP}}:{{.Port}}"))
|
||||
viper.Set(config.MachineProfile, constants.DefaultMachineName)
|
||||
viper.Set(config.ProfileName, constants.DefaultClusterName)
|
||||
|
||||
var tests = []struct {
|
||||
description string
|
||||
|
@ -830,8 +830,8 @@ func TestWaitAndMaybeOpenService(t *testing.T) {
|
|||
defaultAPI := &tests.MockAPI{
|
||||
FakeStore: tests.FakeStore{
|
||||
Hosts: map[string]*host.Host{
|
||||
constants.DefaultMachineName: {
|
||||
Name: constants.DefaultMachineName,
|
||||
constants.DefaultClusterName: {
|
||||
Name: constants.DefaultClusterName,
|
||||
Driver: &tests.MockDriver{},
|
||||
},
|
||||
},
|
||||
|
@ -947,8 +947,8 @@ func TestWaitAndMaybeOpenServiceForNotDefaultNamspace(t *testing.T) {
|
|||
defaultAPI := &tests.MockAPI{
|
||||
FakeStore: tests.FakeStore{
|
||||
Hosts: map[string]*host.Host{
|
||||
constants.DefaultMachineName: {
|
||||
Name: constants.DefaultMachineName,
|
||||
constants.DefaultClusterName: {
|
||||
Name: constants.DefaultClusterName,
|
||||
Driver: &tests.MockDriver{},
|
||||
},
|
||||
},
|
||||
|
|
Loading…
Reference in New Issue