fix config import nonsense in start.go

pull/6318/head
Sharif Elgamal 2019-12-19 09:45:00 -08:00
parent e46c586764
commit 3f20aa4e82
No known key found for this signature in database
GPG Key ID: 23CC0225BD9FD702
1 changed files with 54 additions and 55 deletions

View File

@ -45,14 +45,13 @@ import (
"github.com/spf13/cobra"
"github.com/spf13/viper"
"golang.org/x/sync/errgroup"
cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config"
cmdconfig "k8s.io/minikube/cmd/minikube/cmd/config"
"k8s.io/minikube/pkg/minikube/bootstrapper"
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
"k8s.io/minikube/pkg/minikube/bootstrapper/kubeadm"
"k8s.io/minikube/pkg/minikube/cluster"
"k8s.io/minikube/pkg/minikube/command"
"k8s.io/minikube/pkg/minikube/config"
cfg "k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/driver"
@ -134,7 +133,7 @@ var (
apiServerNames []string
addonList []string
apiServerIPs []net.IP
extraOptions cfg.ExtraOptionSlice
extraOptions config.ExtraOptionSlice
)
func init() {
@ -288,7 +287,7 @@ func runStart(cmd *cobra.Command, args []string) {
registryMirror = viper.GetStringSlice("registry_mirror")
}
existing, err := cfg.Load(viper.GetString(config.MachineProfile))
existing, err := config.Load(viper.GetString(config.MachineProfile))
if err != nil && !os.IsNotExist(err) {
exit.WithCodeT(exit.Data, "Unable to load config: {{.error}}", out.V{"error": err})
}
@ -310,13 +309,13 @@ func runStart(cmd *cobra.Command, args []string) {
}
k8sVersion, isUpgrade := getKubernetesVersion(existing)
config, err := generateCfgFromFlags(cmd, k8sVersion, driverName)
mc, err := generateCfgFromFlags(cmd, k8sVersion, driverName)
if err != nil {
exit.WithError("Failed to generate config", err)
}
if !driver.BareMetal(driverName) {
if err := cluster.CacheISO(config); err != nil {
if err := cluster.CacheISO(mc); err != nil {
exit.WithError("Failed to cache ISO", err)
}
}
@ -329,34 +328,34 @@ func runStart(cmd *cobra.Command, args []string) {
// Now that the ISO is downloaded, pull images in the background while the VM boots.
var cacheGroup errgroup.Group
beginCacheImages(&cacheGroup, config.KubernetesConfig.ImageRepository, k8sVersion)
beginCacheImages(&cacheGroup, mc.KubernetesConfig.ImageRepository, k8sVersion)
// Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot.
// Hence, saveConfig must be called before startHost, and again afterwards when we know the IP.
if err := saveConfig(&config); err != nil {
if err := saveConfig(&mc); err != nil {
exit.WithError("Failed to save config", err)
}
// exits here in case of --download-only option.
handleDownloadOnly(&cacheGroup, k8sVersion)
mRunner, preExists, machineAPI, host := startMachine(&config, &config.Nodes[0])
mRunner, preExists, machineAPI, host := startMachine(&mc, &mc.Nodes[0])
defer machineAPI.Close()
// configure the runtime (docker, containerd, crio)
cr := configureRuntimes(mRunner, driverName, config.KubernetesConfig)
cr := configureRuntimes(mRunner, driverName, mc.KubernetesConfig)
showVersionInfo(k8sVersion, cr)
waitCacheImages(&cacheGroup)
// Must be written before bootstrap, otherwise health checks may flake due to stale IP
kubeconfig, err := setupKubeconfig(host, &config.KubernetesConfig, &config.Nodes[0], config.Name)
kubeconfig, err := setupKubeconfig(host, &mc.KubernetesConfig, &mc.Nodes[0], mc.Name)
if err != nil {
exit.WithError("Failed to setup kubeconfig", err)
}
// setup kubeadm (must come after setupKubeconfig)
bs := setupKubeAdm(machineAPI, mRunner, config)
bs := setupKubeAdm(machineAPI, mRunner, mc)
// pull images or restart cluster
bootstrapCluster(bs, cr, mRunner, config.KubernetesConfig, preExists, isUpgrade)
bootstrapCluster(bs, cr, mRunner, mc.KubernetesConfig, preExists, isUpgrade)
configureMounts()
// enable addons with start command
@ -373,11 +372,11 @@ func runStart(cmd *cobra.Command, args []string) {
// Skip pre-existing, because we already waited for health
if viper.GetBool(waitUntilHealthy) && !preExists {
if err := bs.WaitForCluster(config.KubernetesConfig, viper.GetDuration(waitTimeout)); err != nil {
if err := bs.WaitForCluster(mc.KubernetesConfig, viper.GetDuration(waitTimeout)); err != nil {
exit.WithError("Wait failed", err)
}
}
if err := showKubectlInfo(kubeconfig, k8sVersion, config.Name); err != nil {
if err := showKubectlInfo(kubeconfig, k8sVersion, mc.Name); err != nil {
glog.Errorf("kubectl info: %v", err)
}
}
@ -393,7 +392,7 @@ func updateDriver(driverName string) {
func enableAddons() {
for _, a := range addonList {
err := cmdcfg.Set(a, "true")
err := cmdconfig.Set(a, "true")
if err != nil {
exit.WithError("addon enable failed", err)
}
@ -402,8 +401,8 @@ func enableAddons() {
func displayVersion(version string) {
prefix := ""
if viper.GetString(cfg.MachineProfile) != constants.DefaultMachineName {
prefix = fmt.Sprintf("[%s] ", viper.GetString(cfg.MachineProfile))
if viper.GetString(config.MachineProfile) != constants.DefaultMachineName {
prefix = fmt.Sprintf("[%s] ", viper.GetString(config.MachineProfile))
}
versionState := out.Happy
@ -426,7 +425,7 @@ func displayEnviron(env []string) {
}
}
func setupKubeconfig(h *host.Host, c *cfg.KubernetesConfig, n *cfg.Node, clusterName string) (*kubeconfig.Settings, error) {
func setupKubeconfig(h *host.Host, c *config.KubernetesConfig, n *config.Node, clusterName string) (*kubeconfig.Settings, error) {
addr, err := h.Driver.GetURL()
if err != nil {
exit.WithError("Failed to get driver URL", err)
@ -471,7 +470,7 @@ func handleDownloadOnly(cacheGroup *errgroup.Group, k8sVersion string) {
}
func startMachine(config *cfg.MachineConfig, node *cfg.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) {
func startMachine(config *config.MachineConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) {
m, err := machine.NewAPIClient()
if err != nil {
exit.WithError("Failed to get machine client", err)
@ -552,7 +551,7 @@ func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName st
return nil
}
func selectDriver(existing *cfg.MachineConfig) string {
func selectDriver(existing *config.MachineConfig) string {
name := viper.GetString("vm-driver")
glog.Infof("selectDriver: flag=%q, old=%v", name, existing)
options := driver.Choices()
@ -583,7 +582,7 @@ func selectDriver(existing *cfg.MachineConfig) string {
}
// validateDriver validates that the selected driver appears sane, exits if not
func validateDriver(name string, existing *cfg.MachineConfig) {
func validateDriver(name string, existing *config.MachineConfig) {
glog.Infof("validating driver %q against %+v", name, existing)
if !driver.Supported(name) {
exit.WithCodeT(exit.Unavailable, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": name, "os": runtime.GOOS})
@ -620,7 +619,7 @@ func validateDriver(name string, existing *cfg.MachineConfig) {
return
}
machineName := viper.GetString(cfg.MachineProfile)
machineName := viper.GetString(config.MachineProfile)
h, err := api.Load(machineName)
if err != nil {
glog.Warningf("selectDriver api.Load: %v", err)
@ -699,8 +698,8 @@ func selectImageRepository(mirrorCountry string) (bool, string, error) {
// Return a minikube command containing the current profile name
func minikubeCmd() string {
if viper.GetString(cfg.MachineProfile) != constants.DefaultMachineName {
return fmt.Sprintf("minikube -p %s", cfg.MachineProfile)
if viper.GetString(config.MachineProfile) != constants.DefaultMachineName {
return fmt.Sprintf("minikube -p %s", config.MachineProfile)
}
return "minikube"
}
@ -730,7 +729,7 @@ func validateUser(drvName string) {
if !useForce {
os.Exit(exit.Permissions)
}
_, err = cfg.Load(viper.GetString(config.MachineProfile))
_, err = config.Load(viper.GetString(config.MachineProfile))
if err == nil || !os.IsNotExist(err) {
out.T(out.Tip, "Tip: To remove this root owned cluster, run: sudo {{.cmd}} delete", out.V{"cmd": minikubeCmd()})
}
@ -757,7 +756,7 @@ func validateFlags(cmd *cobra.Command, drvName string) {
var cpuCount int
if driver.BareMetal(drvName) {
if viper.GetString(cfg.MachineProfile) != constants.DefaultMachineName {
if viper.GetString(config.MachineProfile) != constants.DefaultMachineName {
exit.WithCodeT(exit.Config, "The 'none' driver does not support multiple profiles: https://minikube.sigs.k8s.io/docs/reference/drivers/none/")
}
@ -789,8 +788,8 @@ func validateFlags(cmd *cobra.Command, drvName string) {
// check that kubeadm extra args contain only whitelisted parameters
for param := range extraOptions.AsMap().Get(kubeadm.Kubeadm) {
if !cfg.ContainsParam(kubeadm.KubeadmExtraArgsWhitelist[kubeadm.KubeadmCmdParam], param) &&
!cfg.ContainsParam(kubeadm.KubeadmExtraArgsWhitelist[kubeadm.KubeadmConfigParam], param) {
if !config.ContainsParam(kubeadm.KubeadmExtraArgsWhitelist[kubeadm.KubeadmCmdParam], param) &&
!config.ContainsParam(kubeadm.KubeadmExtraArgsWhitelist[kubeadm.KubeadmConfigParam], param) {
exit.UsageT("Sorry, the kubeadm.{{.parameter_name}} parameter is currently not supported by --extra-config", out.V{"parameter_name": param})
}
}
@ -818,7 +817,7 @@ func validateRegistryMirror() {
// doCacheBinaries caches Kubernetes binaries in the foreground
func doCacheBinaries(k8sVersion string) error {
return machine.CacheBinariesForBootstrapper(k8sVersion, viper.GetString(cmdcfg.Bootstrapper))
return machine.CacheBinariesForBootstrapper(k8sVersion, viper.GetString(cmdconfig.Bootstrapper))
}
// beginCacheImages caches Docker images in the background
@ -828,7 +827,7 @@ func beginCacheImages(g *errgroup.Group, imageRepository string, k8sVersion stri
}
g.Go(func() error {
return machine.CacheImagesForBootstrapper(imageRepository, k8sVersion, viper.GetString(cmdcfg.Bootstrapper))
return machine.CacheImagesForBootstrapper(imageRepository, k8sVersion, viper.GetString(cmdconfig.Bootstrapper))
})
}
@ -842,11 +841,11 @@ func waitCacheImages(g *errgroup.Group) {
}
}
// generateCfgFromFlags generates cfg.Config based on flags and supplied arguments
func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string) (cfg.MachineConfig, error) {
// generateCfgFromFlags generates config.Config based on flags and supplied arguments
func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string) (config.MachineConfig, config.Node, error) {
r, err := cruntime.New(cruntime.Config{Type: viper.GetString(containerRuntime)})
if err != nil {
return cfg.MachineConfig{}, err
return config.MachineConfig{}, config.Node{}, err
}
// Pick good default values for --network-plugin and --enable-default-cni based on runtime.
@ -887,8 +886,15 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string)
out.T(out.SuccessType, "Using image repository {{.name}}", out.V{"name": repository})
}
cfg := cfg.MachineConfig{
Name: viper.GetString(cfg.MachineProfile),
masterNode := config.Node{
Port: viper.GetInt(apiServerPort),
KubernetesVersion: k8sVersion,
Name: constants.DefaultNodeName,
Type: config.Master,
}
cfg := config.MachineConfig{
Name: viper.GetString(config.MachineProfile),
KeepContext: viper.GetBool(keepContext),
EmbedCerts: viper.GetBool(embedCerts),
MinikubeISO: viper.GetString(isoURL),
@ -919,7 +925,7 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string)
HostDNSResolver: viper.GetBool(hostDNSResolver),
HostOnlyNicType: viper.GetString(hostOnlyNicType),
NatNicType: viper.GetString(natNicType),
KubernetesConfig: cfg.KubernetesConfig{
KubernetesConfig: config.KubernetesConfig{
KubernetesVersion: k8sVersion,
APIServerName: viper.GetString(apiServerName),
APIServerNames: apiServerNames,
@ -935,16 +941,9 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string)
ShouldLoadCachedImages: viper.GetBool(cacheImages),
EnableDefaultCNI: selectedEnableDefaultCNI,
},
Nodes: []cfg.Node{
cfg.Node{
Port: viper.GetInt(apiServerPort),
KubernetesVersion: k8sVersion,
Name: constants.DefaultNodeName,
Type: cfg.Master,
},
},
Nodes: []config.Node{masterNode},
}
return cfg, nil
return cfg, masterNode, nil
}
// setDockerProxy sets the proxy environment variables in the docker environment.
@ -981,7 +980,7 @@ func autoSetDriverOptions(cmd *cobra.Command, drvName string) error {
// prepareNone prepares the user and host for the joy of the "none" driver
func prepareNone() {
out.T(out.StartingNone, "Configuring local host environment ...")
if viper.GetBool(cfg.WantNoneDriverWarning) {
if viper.GetBool(config.WantNoneDriverWarning) {
out.T(out.Empty, "")
out.WarningT("The 'none' driver provides limited isolation and may reduce system security and reliability.")
out.WarningT("For more information, see:")
@ -1008,7 +1007,7 @@ func prepareNone() {
}
// startHost starts a new minikube host using a VM or None
func startHost(api libmachine.API, mc cfg.MachineConfig) (*host.Host, bool) {
func startHost(api libmachine.API, mc config.MachineConfig) (*host.Host, bool) {
exists, err := api.Exists(mc.Name)
if err != nil {
exit.WithError("Failed to check if machine exists", err)
@ -1126,7 +1125,7 @@ func tryRegistry(r command.Runner) {
}
// getKubernetesVersion ensures that the requested version is reasonable
func getKubernetesVersion(old *cfg.MachineConfig) (string, bool) {
func getKubernetesVersion(old *config.MachineConfig) (string, bool) {
paramVersion := viper.GetString(kubernetesVersion)
isUpgrade := false
@ -1195,8 +1194,8 @@ func getKubernetesVersion(old *cfg.MachineConfig) (string, bool) {
}
// setupKubeAdm adds any requested files into the VM before Kubernetes is started
func setupKubeAdm(mAPI libmachine.API, cr command.Runner, config cfg.MachineConfig, node cfg.Node) bootstrapper.Bootstrapper {
bs, err := getClusterBootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper))
func setupKubeAdm(mAPI libmachine.API, cr command.Runner, config config.MachineConfig, node config.Node) bootstrapper.Bootstrapper {
bs, err := getClusterBootstrapper(mAPI, viper.GetString(cmdconfig.Bootstrapper))
if err != nil {
exit.WithError("Failed to get bootstrapper", err)
}
@ -1214,7 +1213,7 @@ func setupKubeAdm(mAPI libmachine.API, cr command.Runner, config cfg.MachineConf
}
// configureRuntimes does what needs to happen to get a runtime going.
func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s cfg.KubernetesConfig) cruntime.Manager {
func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config.KubernetesConfig) cruntime.Manager {
config := cruntime.Config{Type: viper.GetString(containerRuntime), Runner: runner, ImageRepository: k8s.ImageRepository, KubernetesVersion: k8s.KubernetesVersion}
cr, err := cruntime.New(config)
if err != nil {
@ -1234,7 +1233,7 @@ func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s cfg.Ku
}
// bootstrapCluster starts Kubernetes using the chosen bootstrapper
func bootstrapCluster(bs bootstrapper.Bootstrapper, r cruntime.Manager, runner command.Runner, kc cfg.KubernetesConfig, preexisting bool, isUpgrade bool) {
func bootstrapCluster(bs bootstrapper.Bootstrapper, r cruntime.Manager, runner command.Runner, kc config.KubernetesConfig, preexisting bool, isUpgrade bool) {
if isUpgrade || !preexisting {
out.T(out.Pulling, "Pulling images ...")
if err := bs.PullImages(kc); err != nil {
@ -1275,6 +1274,6 @@ func configureMounts() {
}
// saveConfig saves profile cluster configuration in $MINIKUBE_HOME/profiles/<profilename>/config.json
func saveConfig(clusterCfg *cfg.MachineConfig) error {
return cfg.SaveProfile(viper.GetString(cfg.MachineProfile), clusterCfg)
func saveConfig(clusterCfg *config.MachineConfig) error {
return config.SaveProfile(viper.GetString(config.MachineProfile), clusterCfg)
}