commit
13ca283b10
|
@ -80,7 +80,11 @@ var printProfilesTable = func() {
|
|||
if err != nil {
|
||||
glog.Infof("error getting host status for %v", err)
|
||||
}
|
||||
validData = append(validData, []string{p.Name, p.Config[0].VMDriver, p.Config[0].KubernetesConfig.NodeIP, strconv.Itoa(p.Config[0].KubernetesConfig.NodePort), p.Config[0].KubernetesConfig.KubernetesVersion, p.Status})
|
||||
cp, err := config.PrimaryControlPlane(*p.Config)
|
||||
if err != nil {
|
||||
exit.WithError("profile has no control plane", err)
|
||||
}
|
||||
validData = append(validData, []string{p.Name, p.Config.VMDriver, cp.IP, strconv.Itoa(cp.Port), p.Config.KubernetesConfig.KubernetesVersion, p.Status})
|
||||
}
|
||||
|
||||
table.AppendBulk(validData)
|
||||
|
|
|
@ -91,10 +91,12 @@ var dashboardCmd = &cobra.Command{
|
|||
}
|
||||
}
|
||||
|
||||
err = proxy.ExcludeIP(cc.KubernetesConfig.NodeIP) // to be used for http get calls
|
||||
for _, n := range cc.Nodes {
|
||||
err = proxy.ExcludeIP(n.IP) // to be used for http get calls
|
||||
if err != nil {
|
||||
glog.Errorf("Error excluding IP from proxy: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
kubectl, err := exec.LookPath("kubectl")
|
||||
if err != nil {
|
||||
|
|
|
@ -73,7 +73,7 @@ func runPause(cmd *cobra.Command, args []string) {
|
|||
exit.WithError("Failed to get command runner", err)
|
||||
}
|
||||
|
||||
cr, err := cruntime.New(cruntime.Config{Type: cc.ContainerRuntime, Runner: r})
|
||||
cr, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime, Runner: r})
|
||||
if err != nil {
|
||||
exit.WithError("Failed runtime", err)
|
||||
}
|
||||
|
|
|
@ -53,7 +53,6 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/cluster"
|
||||
"k8s.io/minikube/pkg/minikube/command"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
cfg "k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
|
@ -137,7 +136,7 @@ var (
|
|||
apiServerNames []string
|
||||
addonList []string
|
||||
apiServerIPs []net.IP
|
||||
extraOptions cfg.ExtraOptionSlice
|
||||
extraOptions config.ExtraOptionSlice
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -296,7 +295,7 @@ func runStart(cmd *cobra.Command, args []string) {
|
|||
registryMirror = viper.GetStringSlice("registry_mirror")
|
||||
}
|
||||
|
||||
existing, err := cfg.Load(viper.GetString(config.MachineProfile))
|
||||
existing, err := config.Load(viper.GetString(config.MachineProfile))
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
exit.WithCodeT(exit.Data, "Unable to load config: {{.error}}", out.V{"error": err})
|
||||
}
|
||||
|
@ -318,7 +317,7 @@ func runStart(cmd *cobra.Command, args []string) {
|
|||
}
|
||||
|
||||
k8sVersion, isUpgrade := getKubernetesVersion(existing)
|
||||
config, err := generateCfgFromFlags(cmd, k8sVersion, driverName)
|
||||
mc, n, err := generateCfgFromFlags(cmd, k8sVersion, driverName)
|
||||
if err != nil {
|
||||
exit.WithError("Failed to generate config", err)
|
||||
}
|
||||
|
@ -329,7 +328,7 @@ func runStart(cmd *cobra.Command, args []string) {
|
|||
return
|
||||
}
|
||||
|
||||
cacheISO(&config, driverName)
|
||||
cacheISO(&mc, driverName)
|
||||
|
||||
if viper.GetBool(nativeSSH) {
|
||||
ssh.SetDefaultClient(ssh.Native)
|
||||
|
@ -339,34 +338,34 @@ func runStart(cmd *cobra.Command, args []string) {
|
|||
|
||||
// Now that the ISO is downloaded, pull images in the background while the VM boots.
|
||||
var cacheGroup errgroup.Group
|
||||
beginCacheRequiredImages(&cacheGroup, config.KubernetesConfig.ImageRepository, k8sVersion)
|
||||
beginCacheRequiredImages(&cacheGroup, mc.KubernetesConfig.ImageRepository, k8sVersion)
|
||||
|
||||
// Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot.
|
||||
// Hence, saveConfig must be called before startHost, and again afterwards when we know the IP.
|
||||
if err := saveConfig(&config); err != nil {
|
||||
if err := saveConfig(&mc); err != nil {
|
||||
exit.WithError("Failed to save config", err)
|
||||
}
|
||||
|
||||
// exits here in case of --download-only option.
|
||||
handleDownloadOnly(&cacheGroup, k8sVersion)
|
||||
mRunner, preExists, machineAPI, host := startMachine(&config)
|
||||
mRunner, preExists, machineAPI, host := startMachine(&mc, &n)
|
||||
defer machineAPI.Close()
|
||||
// configure the runtime (docker, containerd, crio)
|
||||
cr := configureRuntimes(mRunner, driverName, config.KubernetesConfig)
|
||||
cr := configureRuntimes(mRunner, driverName, mc.KubernetesConfig)
|
||||
showVersionInfo(k8sVersion, cr)
|
||||
waitCacheRequiredImages(&cacheGroup)
|
||||
|
||||
// Must be written before bootstrap, otherwise health checks may flake due to stale IP
|
||||
kubeconfig, err := setupKubeconfig(host, &config, config.Name)
|
||||
kubeconfig, err := setupKubeconfig(host, &mc, &n, mc.Name)
|
||||
if err != nil {
|
||||
exit.WithError("Failed to setup kubeconfig", err)
|
||||
}
|
||||
|
||||
// setup kubeadm (must come after setupKubeconfig)
|
||||
bs := setupKubeAdm(machineAPI, config)
|
||||
bs := setupKubeAdm(machineAPI, mc, n)
|
||||
|
||||
// pull images or restart cluster
|
||||
bootstrapCluster(bs, cr, mRunner, config, preExists, isUpgrade)
|
||||
bootstrapCluster(bs, cr, mRunner, mc, preExists, isUpgrade)
|
||||
configureMounts()
|
||||
|
||||
// enable addons with start command
|
||||
|
@ -383,11 +382,11 @@ func runStart(cmd *cobra.Command, args []string) {
|
|||
|
||||
// Skip pre-existing, because we already waited for health
|
||||
if viper.GetBool(waitUntilHealthy) && !preExists {
|
||||
if err := bs.WaitForCluster(config, viper.GetDuration(waitTimeout)); err != nil {
|
||||
if err := bs.WaitForCluster(mc, viper.GetDuration(waitTimeout)); err != nil {
|
||||
exit.WithError("Wait failed", err)
|
||||
}
|
||||
}
|
||||
if err := showKubectlInfo(kubeconfig, k8sVersion, config.Name); err != nil {
|
||||
if err := showKubectlInfo(kubeconfig, k8sVersion, mc.Name); err != nil {
|
||||
glog.Errorf("kubectl info: %v", err)
|
||||
}
|
||||
}
|
||||
|
@ -401,9 +400,9 @@ func updateDriver(driverName string) {
|
|||
}
|
||||
}
|
||||
|
||||
func cacheISO(config *cfg.MachineConfig, driverName string) {
|
||||
func cacheISO(cfg *config.MachineConfig, driverName string) {
|
||||
if !driver.BareMetal(driverName) && !driver.IsKIC(driverName) {
|
||||
if err := cluster.CacheISO(*config); err != nil {
|
||||
if err := cluster.CacheISO(*cfg); err != nil {
|
||||
exit.WithError("Failed to cache ISO", err)
|
||||
}
|
||||
}
|
||||
|
@ -420,8 +419,8 @@ func enableAddons() {
|
|||
|
||||
func displayVersion(version string) {
|
||||
prefix := ""
|
||||
if viper.GetString(cfg.MachineProfile) != constants.DefaultMachineName {
|
||||
prefix = fmt.Sprintf("[%s] ", viper.GetString(cfg.MachineProfile))
|
||||
if viper.GetString(config.MachineProfile) != constants.DefaultMachineName {
|
||||
prefix = fmt.Sprintf("[%s] ", viper.GetString(config.MachineProfile))
|
||||
}
|
||||
|
||||
versionState := out.Happy
|
||||
|
@ -444,18 +443,18 @@ func displayEnviron(env []string) {
|
|||
}
|
||||
}
|
||||
|
||||
func setupKubeconfig(h *host.Host, c *cfg.MachineConfig, clusterName string) (*kubeconfig.Settings, error) {
|
||||
func setupKubeconfig(h *host.Host, c *config.MachineConfig, n *config.Node, clusterName string) (*kubeconfig.Settings, error) {
|
||||
addr, err := h.Driver.GetURL()
|
||||
if err != nil {
|
||||
exit.WithError("Failed to get driver URL", err)
|
||||
}
|
||||
if !driver.IsKIC(h.DriverName) {
|
||||
addr = strings.Replace(addr, "tcp://", "https://", -1)
|
||||
addr = strings.Replace(addr, ":2376", ":"+strconv.Itoa(c.KubernetesConfig.NodePort), -1)
|
||||
addr = strings.Replace(addr, ":2376", ":"+strconv.Itoa(n.Port), -1)
|
||||
}
|
||||
|
||||
if c.KubernetesConfig.APIServerName != constants.APIServerName {
|
||||
addr = strings.Replace(addr, c.KubernetesConfig.NodeIP, c.KubernetesConfig.APIServerName, -1)
|
||||
addr = strings.Replace(addr, n.IP, c.KubernetesConfig.APIServerName, -1)
|
||||
}
|
||||
kcs := &kubeconfig.Settings{
|
||||
ClusterName: clusterName,
|
||||
|
@ -491,12 +490,12 @@ func handleDownloadOnly(cacheGroup *errgroup.Group, k8sVersion string) {
|
|||
|
||||
}
|
||||
|
||||
func startMachine(config *cfg.MachineConfig) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) {
|
||||
func startMachine(cfg *config.MachineConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) {
|
||||
m, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
exit.WithError("Failed to get machine client", err)
|
||||
}
|
||||
host, preExists = startHost(m, *config)
|
||||
host, preExists = startHost(m, *cfg)
|
||||
runner, err = machine.CommandRunner(host)
|
||||
if err != nil {
|
||||
exit.WithError("Failed to get command runner", err)
|
||||
|
@ -510,8 +509,9 @@ func startMachine(config *cfg.MachineConfig) (runner command.Runner, preExists b
|
|||
out.ErrT(out.FailureType, "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.", out.V{"ip": ip})
|
||||
}
|
||||
// Save IP to configuration file for subsequent use
|
||||
config.KubernetesConfig.NodeIP = ip
|
||||
if err := saveConfig(config); err != nil {
|
||||
node.IP = ip
|
||||
|
||||
if err := saveNodeToConfig(cfg, node); err != nil {
|
||||
exit.WithError("Failed to save config", err)
|
||||
}
|
||||
|
||||
|
@ -573,7 +573,7 @@ func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName st
|
|||
return nil
|
||||
}
|
||||
|
||||
func selectDriver(existing *cfg.MachineConfig) string {
|
||||
func selectDriver(existing *config.MachineConfig) string {
|
||||
name := viper.GetString("vm-driver")
|
||||
glog.Infof("selectDriver: flag=%q, old=%v", name, existing)
|
||||
|
||||
|
@ -613,7 +613,7 @@ func selectDriver(existing *cfg.MachineConfig) string {
|
|||
}
|
||||
|
||||
// validateDriver validates that the selected driver appears sane, exits if not
|
||||
func validateDriver(name string, existing *cfg.MachineConfig) {
|
||||
func validateDriver(name string, existing *config.MachineConfig) {
|
||||
glog.Infof("validating driver %q against %+v", name, existing)
|
||||
if !driver.Supported(name) {
|
||||
exit.WithCodeT(exit.Unavailable, "The driver {{.experimental}} '{{.driver}}' is not supported on {{.os}}", out.V{"driver": name, "os": runtime.GOOS})
|
||||
|
@ -650,7 +650,7 @@ func validateDriver(name string, existing *cfg.MachineConfig) {
|
|||
return
|
||||
}
|
||||
|
||||
machineName := viper.GetString(cfg.MachineProfile)
|
||||
machineName := viper.GetString(config.MachineProfile)
|
||||
h, err := api.Load(machineName)
|
||||
if err != nil {
|
||||
glog.Warningf("selectDriver api.Load: %v", err)
|
||||
|
@ -729,8 +729,8 @@ func selectImageRepository(mirrorCountry string) (bool, string, error) {
|
|||
|
||||
// Return a minikube command containing the current profile name
|
||||
func minikubeCmd() string {
|
||||
if viper.GetString(cfg.MachineProfile) != constants.DefaultMachineName {
|
||||
return fmt.Sprintf("minikube -p %s", cfg.MachineProfile)
|
||||
if viper.GetString(config.MachineProfile) != constants.DefaultMachineName {
|
||||
return fmt.Sprintf("minikube -p %s", config.MachineProfile)
|
||||
}
|
||||
return "minikube"
|
||||
}
|
||||
|
@ -760,7 +760,7 @@ func validateUser(drvName string) {
|
|||
if !useForce {
|
||||
os.Exit(exit.Permissions)
|
||||
}
|
||||
_, err = cfg.Load(viper.GetString(config.MachineProfile))
|
||||
_, err = config.Load(viper.GetString(config.MachineProfile))
|
||||
if err == nil || !os.IsNotExist(err) {
|
||||
out.T(out.Tip, "Tip: To remove this root owned cluster, run: sudo {{.cmd}} delete", out.V{"cmd": minikubeCmd()})
|
||||
}
|
||||
|
@ -814,7 +814,7 @@ func validateFlags(cmd *cobra.Command, drvName string) {
|
|||
validateMemorySize()
|
||||
|
||||
if driver.BareMetal(drvName) {
|
||||
if viper.GetString(cfg.MachineProfile) != constants.DefaultMachineName {
|
||||
if viper.GetString(config.MachineProfile) != constants.DefaultMachineName {
|
||||
exit.WithCodeT(exit.Config, "The 'none' driver does not support multiple profiles: https://minikube.sigs.k8s.io/docs/reference/drivers/none/")
|
||||
}
|
||||
|
||||
|
@ -835,8 +835,8 @@ func validateFlags(cmd *cobra.Command, drvName string) {
|
|||
|
||||
// check that kubeadm extra args contain only whitelisted parameters
|
||||
for param := range extraOptions.AsMap().Get(bsutil.Kubeadm) {
|
||||
if !cfg.ContainsParam(bsutil.KubeadmExtraArgsWhitelist[bsutil.KubeadmCmdParam], param) &&
|
||||
!cfg.ContainsParam(bsutil.KubeadmExtraArgsWhitelist[bsutil.KubeadmConfigParam], param) {
|
||||
if !config.ContainsParam(bsutil.KubeadmExtraArgsWhitelist[bsutil.KubeadmCmdParam], param) &&
|
||||
!config.ContainsParam(bsutil.KubeadmExtraArgsWhitelist[bsutil.KubeadmConfigParam], param) {
|
||||
exit.UsageT("Sorry, the kubeadm.{{.parameter_name}} parameter is currently not supported by --extra-config", out.V{"parameter_name": param})
|
||||
}
|
||||
}
|
||||
|
@ -888,11 +888,11 @@ func waitCacheRequiredImages(g *errgroup.Group) {
|
|||
}
|
||||
}
|
||||
|
||||
// generateCfgFromFlags generates cfg.Config based on flags and supplied arguments
|
||||
func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string) (cfg.MachineConfig, error) {
|
||||
// generateCfgFromFlags generates config.Config based on flags and supplied arguments
|
||||
func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string) (config.MachineConfig, config.Node, error) {
|
||||
r, err := cruntime.New(cruntime.Config{Type: viper.GetString(containerRuntime)})
|
||||
if err != nil {
|
||||
return cfg.MachineConfig{}, err
|
||||
return config.MachineConfig{}, config.Node{}, err
|
||||
}
|
||||
|
||||
// Pick good default values for --network-plugin and --enable-default-cni based on runtime.
|
||||
|
@ -933,8 +933,17 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string)
|
|||
out.T(out.SuccessType, "Using image repository {{.name}}", out.V{"name": repository})
|
||||
}
|
||||
|
||||
cfg := cfg.MachineConfig{
|
||||
Name: viper.GetString(cfg.MachineProfile),
|
||||
// Create the initial node, which will necessarily be a control plane
|
||||
cp := config.Node{
|
||||
Port: viper.GetInt(apiServerPort),
|
||||
KubernetesVersion: k8sVersion,
|
||||
Name: constants.DefaultNodeName,
|
||||
ControlPlane: true,
|
||||
Worker: true,
|
||||
}
|
||||
|
||||
cfg := config.MachineConfig{
|
||||
Name: viper.GetString(config.MachineProfile),
|
||||
KeepContext: viper.GetBool(keepContext),
|
||||
EmbedCerts: viper.GetBool(embedCerts),
|
||||
MinikubeISO: viper.GetString(isoURL),
|
||||
|
@ -942,7 +951,6 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string)
|
|||
CPUs: viper.GetInt(cpus),
|
||||
DiskSize: pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize)),
|
||||
VMDriver: drvName,
|
||||
ContainerRuntime: viper.GetString(containerRuntime),
|
||||
HyperkitVpnKitSock: viper.GetString(vpnkitSock),
|
||||
HyperkitVSockPorts: viper.GetStringSlice(vsockPorts),
|
||||
NFSShare: viper.GetStringSlice(nfsShare),
|
||||
|
@ -965,10 +973,8 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string)
|
|||
HostDNSResolver: viper.GetBool(hostDNSResolver),
|
||||
HostOnlyNicType: viper.GetString(hostOnlyNicType),
|
||||
NatNicType: viper.GetString(natNicType),
|
||||
KubernetesConfig: cfg.KubernetesConfig{
|
||||
KubernetesConfig: config.KubernetesConfig{
|
||||
KubernetesVersion: k8sVersion,
|
||||
NodePort: viper.GetInt(apiServerPort),
|
||||
NodeName: constants.DefaultNodeName,
|
||||
APIServerName: viper.GetString(apiServerName),
|
||||
APIServerNames: apiServerNames,
|
||||
APIServerIPs: apiServerIPs,
|
||||
|
@ -983,8 +989,9 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string)
|
|||
ShouldLoadCachedImages: viper.GetBool(cacheImages),
|
||||
EnableDefaultCNI: selectedEnableDefaultCNI,
|
||||
},
|
||||
Nodes: []config.Node{cp},
|
||||
}
|
||||
return cfg, nil
|
||||
return cfg, cp, nil
|
||||
}
|
||||
|
||||
// setDockerProxy sets the proxy environment variables in the docker environment.
|
||||
|
@ -1040,7 +1047,7 @@ func autoSetDriverOptions(cmd *cobra.Command, drvName string) (err error) {
|
|||
// prepareNone prepares the user and host for the joy of the "none" driver
|
||||
func prepareNone() {
|
||||
out.T(out.StartingNone, "Configuring local host environment ...")
|
||||
if viper.GetBool(cfg.WantNoneDriverWarning) {
|
||||
if viper.GetBool(config.WantNoneDriverWarning) {
|
||||
out.T(out.Empty, "")
|
||||
out.WarningT("The 'none' driver provides limited isolation and may reduce system security and reliability.")
|
||||
out.WarningT("For more information, see:")
|
||||
|
@ -1067,7 +1074,7 @@ func prepareNone() {
|
|||
}
|
||||
|
||||
// startHost starts a new minikube host using a VM or None
|
||||
func startHost(api libmachine.API, mc cfg.MachineConfig) (*host.Host, bool) {
|
||||
func startHost(api libmachine.API, mc config.MachineConfig) (*host.Host, bool) {
|
||||
exists, err := api.Exists(mc.Name)
|
||||
if err != nil {
|
||||
exit.WithError("Failed to check if machine exists", err)
|
||||
|
@ -1174,7 +1181,7 @@ func tryRegistry(r command.Runner) {
|
|||
}
|
||||
|
||||
// getKubernetesVersion ensures that the requested version is reasonable
|
||||
func getKubernetesVersion(old *cfg.MachineConfig) (string, bool) {
|
||||
func getKubernetesVersion(old *config.MachineConfig) (string, bool) {
|
||||
paramVersion := viper.GetString(kubernetesVersion)
|
||||
isUpgrade := false
|
||||
|
||||
|
@ -1244,7 +1251,7 @@ func getKubernetesVersion(old *cfg.MachineConfig) (string, bool) {
|
|||
}
|
||||
|
||||
// setupKubeAdm adds any requested files into the VM before Kubernetes is started
|
||||
func setupKubeAdm(mAPI libmachine.API, config cfg.MachineConfig) bootstrapper.Bootstrapper {
|
||||
func setupKubeAdm(mAPI libmachine.API, cfg config.MachineConfig, node config.Node) bootstrapper.Bootstrapper {
|
||||
bs, err := getClusterBootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper))
|
||||
if err != nil {
|
||||
exit.WithError("Failed to get bootstrapper", err)
|
||||
|
@ -1253,17 +1260,17 @@ func setupKubeAdm(mAPI libmachine.API, config cfg.MachineConfig) bootstrapper.Bo
|
|||
out.T(out.Option, "{{.extra_option_component_name}}.{{.key}}={{.value}}", out.V{"extra_option_component_name": eo.Component, "key": eo.Key, "value": eo.Value})
|
||||
}
|
||||
// Loads cached images, generates config files, download binaries
|
||||
if err := bs.UpdateCluster(config); err != nil {
|
||||
if err := bs.UpdateCluster(cfg); err != nil {
|
||||
exit.WithError("Failed to update cluster", err)
|
||||
}
|
||||
if err := bs.SetupCerts(config.KubernetesConfig); err != nil {
|
||||
if err := bs.SetupCerts(cfg.KubernetesConfig, node); err != nil {
|
||||
exit.WithError("Failed to setup certs", err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
// configureRuntimes does what needs to happen to get a runtime going.
|
||||
func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s cfg.KubernetesConfig) cruntime.Manager {
|
||||
func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config.KubernetesConfig) cruntime.Manager {
|
||||
config := cruntime.Config{Type: viper.GetString(containerRuntime), Runner: runner, ImageRepository: k8s.ImageRepository, KubernetesVersion: k8s.KubernetesVersion}
|
||||
cr, err := cruntime.New(config)
|
||||
if err != nil {
|
||||
|
@ -1283,16 +1290,16 @@ func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s cfg.Ku
|
|||
}
|
||||
|
||||
// bootstrapCluster starts Kubernetes using the chosen bootstrapper
|
||||
func bootstrapCluster(bs bootstrapper.Bootstrapper, r cruntime.Manager, runner command.Runner, c cfg.MachineConfig, preexisting bool, isUpgrade bool) {
|
||||
func bootstrapCluster(bs bootstrapper.Bootstrapper, r cruntime.Manager, runner command.Runner, mc config.MachineConfig, preexisting bool, isUpgrade bool) {
|
||||
if isUpgrade || !preexisting {
|
||||
out.T(out.Pulling, "Pulling images ...")
|
||||
if err := bs.PullImages(c.KubernetesConfig); err != nil {
|
||||
if err := bs.PullImages(mc.KubernetesConfig); err != nil {
|
||||
out.T(out.FailureType, "Unable to pull images, which may be OK: {{.error}}", out.V{"error": err})
|
||||
}
|
||||
}
|
||||
|
||||
out.T(out.Launch, "Launching Kubernetes ... ")
|
||||
if err := bs.StartCluster(c); err != nil {
|
||||
if err := bs.StartCluster(mc); err != nil {
|
||||
exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(r, bs, runner))
|
||||
}
|
||||
}
|
||||
|
@ -1324,6 +1331,16 @@ func configureMounts() {
|
|||
}
|
||||
|
||||
// saveConfig saves profile cluster configuration in $MINIKUBE_HOME/profiles/<profilename>/config.json
|
||||
func saveConfig(clusterCfg *cfg.MachineConfig) error {
|
||||
return cfg.CreateProfile(viper.GetString(cfg.MachineProfile), clusterCfg)
|
||||
func saveConfig(clusterCfg *config.MachineConfig) error {
|
||||
return config.SaveProfile(viper.GetString(config.MachineProfile), clusterCfg)
|
||||
}
|
||||
|
||||
func saveNodeToConfig(cfg *config.MachineConfig, node *config.Node) error {
|
||||
for i, n := range cfg.Nodes {
|
||||
if n.Name == node.Name {
|
||||
cfg.Nodes[i] = *node
|
||||
break
|
||||
}
|
||||
}
|
||||
return saveConfig(cfg)
|
||||
}
|
||||
|
|
|
@ -121,7 +121,7 @@ func TestGenerateCfgFromFlagsHTTPProxyHandling(t *testing.T) {
|
|||
if err := os.Setenv("HTTP_PROXY", test.proxy); err != nil {
|
||||
t.Fatalf("Unexpected error setting HTTP_PROXY: %v", err)
|
||||
}
|
||||
config, err := generateCfgFromFlags(cmd, k8sVersion, "none")
|
||||
config, _, err := generateCfgFromFlags(cmd, k8sVersion, "none")
|
||||
if err != nil {
|
||||
t.Fatalf("Got unexpected error %v during config generation", err)
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@ var unpauseCmd = &cobra.Command{
|
|||
exit.WithError("Failed to get command runner", err)
|
||||
}
|
||||
|
||||
cr, err := cruntime.New(cruntime.Config{Type: cc.ContainerRuntime, Runner: r})
|
||||
cr, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime, Runner: r})
|
||||
if err != nil {
|
||||
exit.WithError("Failed runtime", err)
|
||||
}
|
||||
|
|
|
@ -38,6 +38,7 @@ type CopyableFile interface {
|
|||
GetTargetName() string
|
||||
GetPermissions() string
|
||||
GetModTime() (time.Time, error)
|
||||
Seek(int64, int) (int64, error)
|
||||
}
|
||||
|
||||
// BaseAsset is the base asset class
|
||||
|
@ -76,7 +77,7 @@ func (b *BaseAsset) GetModTime() (time.Time, error) {
|
|||
// FileAsset is an asset using a file
|
||||
type FileAsset struct {
|
||||
BaseAsset
|
||||
reader io.Reader
|
||||
reader io.ReadSeeker
|
||||
}
|
||||
|
||||
// NewMemoryAssetTarget creates a new MemoryAsset, with target
|
||||
|
@ -91,6 +92,11 @@ func NewFileAsset(src, targetDir, targetName, permissions string) (*FileAsset, e
|
|||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error opening file asset: %s", src)
|
||||
}
|
||||
info, err := os.Stat(src)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error getting info for %s", src)
|
||||
}
|
||||
r := io.NewSectionReader(f, 0, info.Size())
|
||||
return &FileAsset{
|
||||
BaseAsset: BaseAsset{
|
||||
AssetName: src,
|
||||
|
@ -98,7 +104,7 @@ func NewFileAsset(src, targetDir, targetName, permissions string) (*FileAsset, e
|
|||
TargetName: targetName,
|
||||
Permissions: permissions,
|
||||
},
|
||||
reader: f,
|
||||
reader: r,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -117,6 +123,7 @@ func (f *FileAsset) GetModTime() (time.Time, error) {
|
|||
return fi.ModTime(), err
|
||||
}
|
||||
|
||||
// Read reads the asset
|
||||
func (f *FileAsset) Read(p []byte) (int, error) {
|
||||
if f.reader == nil {
|
||||
return 0, errors.New("Error attempting FileAsset.Read, FileAsset.reader uninitialized")
|
||||
|
@ -124,10 +131,15 @@ func (f *FileAsset) Read(p []byte) (int, error) {
|
|||
return f.reader.Read(p)
|
||||
}
|
||||
|
||||
// Seek resets the reader to offset
|
||||
func (f *FileAsset) Seek(offset int64, whence int) (int64, error) {
|
||||
return f.reader.Seek(offset, whence)
|
||||
}
|
||||
|
||||
// MemoryAsset is a memory-based asset
|
||||
type MemoryAsset struct {
|
||||
BaseAsset
|
||||
reader io.Reader
|
||||
reader io.ReadSeeker
|
||||
length int
|
||||
}
|
||||
|
||||
|
@ -141,6 +153,11 @@ func (m *MemoryAsset) Read(p []byte) (int, error) {
|
|||
return m.reader.Read(p)
|
||||
}
|
||||
|
||||
// Seek resets the reader to offset
|
||||
func (m *MemoryAsset) Seek(offset int64, whence int) (int64, error) {
|
||||
return m.reader.Seek(offset, whence)
|
||||
}
|
||||
|
||||
// NewMemoryAsset creates a new MemoryAsset
|
||||
func NewMemoryAsset(d []byte, targetDir, targetName, permissions string) *MemoryAsset {
|
||||
return &MemoryAsset{
|
||||
|
@ -157,7 +174,7 @@ func NewMemoryAsset(d []byte, targetDir, targetName, permissions string) *Memory
|
|||
// BinAsset is a bindata (binary data) asset
|
||||
type BinAsset struct {
|
||||
BaseAsset
|
||||
reader io.Reader
|
||||
reader io.ReadSeeker
|
||||
template *template.Template
|
||||
length int
|
||||
}
|
||||
|
@ -253,3 +270,8 @@ func (m *BinAsset) Read(p []byte) (int, error) {
|
|||
}
|
||||
return m.reader.Read(p)
|
||||
}
|
||||
|
||||
// Seek resets the reader to offset
|
||||
func (m *BinAsset) Seek(offset int64, whence int) (int64, error) {
|
||||
return m.reader.Seek(offset, whence)
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ type Bootstrapper interface {
|
|||
WaitForCluster(config.MachineConfig, time.Duration) error
|
||||
// LogCommands returns a map of log type to a command which will display that log.
|
||||
LogCommands(LogOptions) map[string]string
|
||||
SetupCerts(cfg config.KubernetesConfig) error
|
||||
SetupCerts(config.KubernetesConfig, config.Node) error
|
||||
GetKubeletStatus() (string, error)
|
||||
GetAPIServerStatus(net.IP, int) (string, error)
|
||||
}
|
||||
|
|
|
@ -35,7 +35,8 @@ import (
|
|||
const remoteContainerRuntime = "remote"
|
||||
|
||||
// GenerateKubeadmYAML generates the kubeadm.yaml file
|
||||
func GenerateKubeadmYAML(k8s config.KubernetesConfig, r cruntime.Manager) ([]byte, error) {
|
||||
func GenerateKubeadmYAML(mc config.MachineConfig, r cruntime.Manager) ([]byte, error) {
|
||||
k8s := mc.KubernetesConfig
|
||||
version, err := ParseKubernetesVersion(k8s.KubernetesVersion)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "parsing kubernetes version")
|
||||
|
@ -53,7 +54,11 @@ func GenerateKubeadmYAML(k8s config.KubernetesConfig, r cruntime.Manager) ([]byt
|
|||
}
|
||||
|
||||
// In case of no port assigned, use default
|
||||
nodePort := k8s.NodePort
|
||||
cp, err := config.PrimaryControlPlane(mc)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getting control plane")
|
||||
}
|
||||
nodePort := cp.Port
|
||||
if nodePort <= 0 {
|
||||
nodePort = constants.APIServerPort
|
||||
}
|
||||
|
@ -77,11 +82,11 @@ func GenerateKubeadmYAML(k8s config.KubernetesConfig, r cruntime.Manager) ([]byt
|
|||
CertDir: vmpath.GuestCertsDir,
|
||||
ServiceCIDR: constants.DefaultServiceCIDR,
|
||||
PodSubnet: k8s.ExtraOptions.Get("pod-network-cidr", Kubeadm),
|
||||
AdvertiseAddress: k8s.NodeIP,
|
||||
AdvertiseAddress: cp.IP,
|
||||
APIServerPort: nodePort,
|
||||
KubernetesVersion: k8s.KubernetesVersion,
|
||||
EtcdDataDir: EtcdDataDir(),
|
||||
NodeName: k8s.NodeName,
|
||||
NodeName: cp.Name,
|
||||
CRISocket: r.SocketPath(),
|
||||
ImageRepository: k8s.ImageRepository,
|
||||
ExtraArgs: extraComponentConfig,
|
||||
|
|
|
@ -106,9 +106,9 @@ func TestGenerateKubeadmYAMLDNS(t *testing.T) {
|
|||
name string
|
||||
runtime string
|
||||
shouldErr bool
|
||||
cfg config.KubernetesConfig
|
||||
cfg config.MachineConfig
|
||||
}{
|
||||
{"dns", "docker", false, config.KubernetesConfig{DNSDomain: "1.1.1.1"}},
|
||||
{"dns", "docker", false, config.MachineConfig{KubernetesConfig: config.KubernetesConfig{DNSDomain: "1.1.1.1"}}},
|
||||
}
|
||||
for _, version := range versions {
|
||||
for _, tc := range tests {
|
||||
|
@ -119,9 +119,14 @@ func TestGenerateKubeadmYAMLDNS(t *testing.T) {
|
|||
tname := tc.name + "_" + version
|
||||
t.Run(tname, func(t *testing.T) {
|
||||
cfg := tc.cfg
|
||||
cfg.NodeIP = "1.1.1.1"
|
||||
cfg.NodeName = "mk"
|
||||
cfg.KubernetesVersion = version + ".0"
|
||||
cfg.Nodes = []config.Node{
|
||||
config.Node{
|
||||
IP: "1.1.1.1",
|
||||
Name: "mk",
|
||||
ControlPlane: true,
|
||||
},
|
||||
}
|
||||
cfg.KubernetesConfig.KubernetesVersion = version + ".0"
|
||||
|
||||
got, err := GenerateKubeadmYAML(cfg, runtime)
|
||||
if err != nil && !tc.shouldErr {
|
||||
|
@ -166,17 +171,17 @@ func TestGenerateKubeadmYAML(t *testing.T) {
|
|||
name string
|
||||
runtime string
|
||||
shouldErr bool
|
||||
cfg config.KubernetesConfig
|
||||
cfg config.MachineConfig
|
||||
}{
|
||||
{"default", "docker", false, config.KubernetesConfig{}},
|
||||
{"containerd", "containerd", false, config.KubernetesConfig{}},
|
||||
{"crio", "crio", false, config.KubernetesConfig{}},
|
||||
{"options", "docker", false, config.KubernetesConfig{ExtraOptions: extraOpts}},
|
||||
{"crio-options-gates", "crio", false, config.KubernetesConfig{ExtraOptions: extraOpts, FeatureGates: "a=b"}},
|
||||
{"unknown-component", "docker", true, config.KubernetesConfig{ExtraOptions: config.ExtraOptionSlice{config.ExtraOption{Component: "not-a-real-component", Key: "killswitch", Value: "true"}}}},
|
||||
{"containerd-api-port", "containerd", false, config.KubernetesConfig{NodePort: 12345}},
|
||||
{"containerd-pod-network-cidr", "containerd", false, config.KubernetesConfig{ExtraOptions: extraOptsPodCidr}},
|
||||
{"image-repository", "docker", false, config.KubernetesConfig{ImageRepository: "test/repo"}},
|
||||
{"default", "docker", false, config.MachineConfig{}},
|
||||
{"containerd", "containerd", false, config.MachineConfig{}},
|
||||
{"crio", "crio", false, config.MachineConfig{}},
|
||||
{"options", "docker", false, config.MachineConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOpts}}},
|
||||
{"crio-options-gates", "crio", false, config.MachineConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOpts, FeatureGates: "a=b"}}},
|
||||
{"unknown-component", "docker", true, config.MachineConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: config.ExtraOptionSlice{config.ExtraOption{Component: "not-a-real-component", Key: "killswitch", Value: "true"}}}}},
|
||||
{"containerd-api-port", "containerd", false, config.MachineConfig{Nodes: []config.Node{config.Node{Port: 12345}}}},
|
||||
{"containerd-pod-network-cidr", "containerd", false, config.MachineConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOptsPodCidr}}},
|
||||
{"image-repository", "docker", false, config.MachineConfig{KubernetesConfig: config.KubernetesConfig{ImageRepository: "test/repo"}}},
|
||||
}
|
||||
for _, version := range versions {
|
||||
for _, tc := range tests {
|
||||
|
@ -187,9 +192,21 @@ func TestGenerateKubeadmYAML(t *testing.T) {
|
|||
tname := tc.name + "_" + version
|
||||
t.Run(tname, func(t *testing.T) {
|
||||
cfg := tc.cfg
|
||||
cfg.NodeIP = "1.1.1.1"
|
||||
cfg.NodeName = "mk"
|
||||
cfg.KubernetesVersion = version + ".0"
|
||||
|
||||
if len(cfg.Nodes) > 0 {
|
||||
cfg.Nodes[0].IP = "1.1.1.1"
|
||||
cfg.Nodes[0].Name = "mk"
|
||||
cfg.Nodes[0].ControlPlane = true
|
||||
} else {
|
||||
cfg.Nodes = []config.Node{
|
||||
config.Node{
|
||||
IP: "1.1.1.1",
|
||||
Name: "mk",
|
||||
ControlPlane: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
cfg.KubernetesConfig.KubernetesVersion = version + ".0"
|
||||
|
||||
got, err := GenerateKubeadmYAML(cfg, runtime)
|
||||
if err != nil && !tc.shouldErr {
|
||||
|
|
|
@ -30,7 +30,8 @@ import (
|
|||
|
||||
// NewKubeletConfig generates a new systemd unit containing a configured kubelet
|
||||
// based on the options present in the KubernetesConfig.
|
||||
func NewKubeletConfig(k8s config.KubernetesConfig, r cruntime.Manager) ([]byte, error) {
|
||||
func NewKubeletConfig(mc config.MachineConfig, r cruntime.Manager) ([]byte, error) {
|
||||
k8s := mc.KubernetesConfig
|
||||
version, err := ParseKubernetesVersion(k8s.KubernetesVersion)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "parsing kubernetes version")
|
||||
|
@ -52,8 +53,12 @@ func NewKubeletConfig(k8s config.KubernetesConfig, r cruntime.Manager) ([]byte,
|
|||
if k8s.NetworkPlugin != "" {
|
||||
extraOpts["network-plugin"] = k8s.NetworkPlugin
|
||||
}
|
||||
cp, err := config.PrimaryControlPlane(mc)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getting master node")
|
||||
}
|
||||
if _, ok := extraOpts["node-ip"]; !ok {
|
||||
extraOpts["node-ip"] = k8s.NodeIP
|
||||
extraOpts["node-ip"] = cp.IP
|
||||
}
|
||||
|
||||
pauseImage := images.Pause(k8s.ImageRepository)
|
||||
|
|
|
@ -30,18 +30,25 @@ import (
|
|||
func TestGenerateKubeletConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
description string
|
||||
cfg config.KubernetesConfig
|
||||
cfg config.MachineConfig
|
||||
expected string
|
||||
shouldErr bool
|
||||
}{
|
||||
{
|
||||
description: "old docker",
|
||||
cfg: config.KubernetesConfig{
|
||||
NodeIP: "192.168.1.100",
|
||||
cfg: config.MachineConfig{
|
||||
KubernetesConfig: config.KubernetesConfig{
|
||||
KubernetesVersion: constants.OldestKubernetesVersion,
|
||||
NodeName: "minikube",
|
||||
ContainerRuntime: "docker",
|
||||
},
|
||||
Nodes: []config.Node{
|
||||
config.Node{
|
||||
IP: "192.168.1.100",
|
||||
Name: "minikube",
|
||||
ControlPlane: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: `[Unit]
|
||||
Wants=docker.socket
|
||||
|
||||
|
@ -54,12 +61,19 @@ ExecStart=/var/lib/minikube/binaries/v1.11.10/kubelet --allow-privileged=true --
|
|||
},
|
||||
{
|
||||
description: "newest cri runtime",
|
||||
cfg: config.KubernetesConfig{
|
||||
NodeIP: "192.168.1.100",
|
||||
cfg: config.MachineConfig{
|
||||
KubernetesConfig: config.KubernetesConfig{
|
||||
KubernetesVersion: constants.NewestKubernetesVersion,
|
||||
NodeName: "minikube",
|
||||
ContainerRuntime: "cri-o",
|
||||
},
|
||||
Nodes: []config.Node{
|
||||
config.Node{
|
||||
IP: "192.168.1.100",
|
||||
Name: "minikube",
|
||||
ControlPlane: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: `[Unit]
|
||||
Wants=crio.service
|
||||
|
||||
|
@ -72,12 +86,19 @@ ExecStart=/var/lib/minikube/binaries/v1.17.2/kubelet --authorization-mode=Webhoo
|
|||
},
|
||||
{
|
||||
description: "default containerd runtime",
|
||||
cfg: config.KubernetesConfig{
|
||||
NodeIP: "192.168.1.100",
|
||||
cfg: config.MachineConfig{
|
||||
KubernetesConfig: config.KubernetesConfig{
|
||||
KubernetesVersion: constants.DefaultKubernetesVersion,
|
||||
NodeName: "minikube",
|
||||
ContainerRuntime: "containerd",
|
||||
},
|
||||
Nodes: []config.Node{
|
||||
config.Node{
|
||||
IP: "192.168.1.100",
|
||||
Name: "minikube",
|
||||
ControlPlane: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: `[Unit]
|
||||
Wants=containerd.service
|
||||
|
||||
|
@ -89,11 +110,10 @@ ExecStart=/var/lib/minikube/binaries/v1.17.2/kubelet --authorization-mode=Webhoo
|
|||
`,
|
||||
},
|
||||
{
|
||||
description: "default containerd runtime",
|
||||
cfg: config.KubernetesConfig{
|
||||
NodeIP: "192.168.1.100",
|
||||
description: "default containerd runtime with IP override",
|
||||
cfg: config.MachineConfig{
|
||||
KubernetesConfig: config.KubernetesConfig{
|
||||
KubernetesVersion: constants.DefaultKubernetesVersion,
|
||||
NodeName: "minikube",
|
||||
ContainerRuntime: "containerd",
|
||||
ExtraOptions: config.ExtraOptionSlice{
|
||||
config.ExtraOption{
|
||||
|
@ -103,6 +123,14 @@ ExecStart=/var/lib/minikube/binaries/v1.17.2/kubelet --authorization-mode=Webhoo
|
|||
},
|
||||
},
|
||||
},
|
||||
Nodes: []config.Node{
|
||||
config.Node{
|
||||
IP: "192.168.1.100",
|
||||
Name: "minikube",
|
||||
ControlPlane: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: `[Unit]
|
||||
Wants=containerd.service
|
||||
|
||||
|
@ -115,13 +143,20 @@ ExecStart=/var/lib/minikube/binaries/v1.17.2/kubelet --authorization-mode=Webhoo
|
|||
},
|
||||
{
|
||||
description: "docker with custom image repository",
|
||||
cfg: config.KubernetesConfig{
|
||||
NodeIP: "192.168.1.100",
|
||||
cfg: config.MachineConfig{
|
||||
KubernetesConfig: config.KubernetesConfig{
|
||||
KubernetesVersion: constants.DefaultKubernetesVersion,
|
||||
NodeName: "minikube",
|
||||
ContainerRuntime: "docker",
|
||||
ImageRepository: "docker-proxy-image.io/google_containers",
|
||||
},
|
||||
Nodes: []config.Node{
|
||||
config.Node{
|
||||
IP: "192.168.1.100",
|
||||
Name: "minikube",
|
||||
ControlPlane: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: `[Unit]
|
||||
Wants=docker.socket
|
||||
|
||||
|
@ -136,7 +171,7 @@ ExecStart=/var/lib/minikube/binaries/v1.17.2/kubelet --authorization-mode=Webhoo
|
|||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
runtime, err := cruntime.New(cruntime.Config{Type: tc.cfg.ContainerRuntime,
|
||||
runtime, err := cruntime.New(cruntime.Config{Type: tc.cfg.KubernetesConfig.ContainerRuntime,
|
||||
Runner: command.NewFakeCommandRunner()})
|
||||
if err != nil {
|
||||
t.Fatalf("runtime: %v", err)
|
||||
|
|
|
@ -60,10 +60,10 @@ var (
|
|||
)
|
||||
|
||||
// SetupCerts gets the generated credentials required to talk to the APIServer.
|
||||
func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig) error {
|
||||
func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node) error {
|
||||
|
||||
localPath := localpath.MiniPath()
|
||||
glog.Infof("Setting up %s for IP: %s\n", localPath, k8s.NodeIP)
|
||||
glog.Infof("Setting up %s for IP: %s\n", localPath, n.IP)
|
||||
|
||||
// WARNING: This function was not designed for multiple profiles, so it is VERY racey:
|
||||
//
|
||||
|
@ -79,7 +79,7 @@ func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig) error {
|
|||
}
|
||||
defer releaser.Release()
|
||||
|
||||
if err := generateCerts(k8s); err != nil {
|
||||
if err := generateCerts(k8s, n); err != nil {
|
||||
return errors.Wrap(err, "Error generating certs")
|
||||
}
|
||||
copyableFiles := []assets.CopyableFile{}
|
||||
|
@ -110,8 +110,8 @@ func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig) error {
|
|||
}
|
||||
|
||||
kcs := &kubeconfig.Settings{
|
||||
ClusterName: k8s.NodeName,
|
||||
ClusterServerAddress: fmt.Sprintf("https://%s", net.JoinHostPort("localhost", fmt.Sprint(k8s.NodePort))),
|
||||
ClusterName: n.Name,
|
||||
ClusterServerAddress: fmt.Sprintf("https://%s", net.JoinHostPort("localhost", fmt.Sprint(n.Port))),
|
||||
ClientCertificate: path.Join(vmpath.GuestCertsDir, "apiserver.crt"),
|
||||
ClientKey: path.Join(vmpath.GuestCertsDir, "apiserver.key"),
|
||||
CertificateAuthority: path.Join(vmpath.GuestCertsDir, "ca.crt"),
|
||||
|
@ -144,7 +144,7 @@ func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func generateCerts(k8s config.KubernetesConfig) error {
|
||||
func generateCerts(k8s config.KubernetesConfig, n config.Node) error {
|
||||
serviceIP, err := util.GetServiceClusterIP(k8s.ServiceCIDR)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "getting service cluster ip")
|
||||
|
@ -176,7 +176,7 @@ func generateCerts(k8s config.KubernetesConfig) error {
|
|||
|
||||
apiServerIPs := append(
|
||||
k8s.APIServerIPs,
|
||||
[]net.IP{net.ParseIP(k8s.NodeIP), serviceIP, net.ParseIP(kic.DefaultBindIPV4), net.ParseIP("10.0.0.1")}...)
|
||||
[]net.IP{net.ParseIP(n.IP), serviceIP, net.ParseIP(kic.DefaultBindIPV4), net.ParseIP("10.0.0.1")}...)
|
||||
apiServerNames := append(k8s.APIServerNames, k8s.APIServerName)
|
||||
apiServerAlternateNames := append(
|
||||
apiServerNames,
|
||||
|
|
|
@ -65,7 +65,7 @@ func TestSetupCerts(t *testing.T) {
|
|||
filesToBeTransferred = append(filesToBeTransferred, filepath.Join(localpath.MiniPath(), "ca.crt"))
|
||||
filesToBeTransferred = append(filesToBeTransferred, filepath.Join(localpath.MiniPath(), "certs", "mycert.pem"))
|
||||
|
||||
if err := SetupCerts(f, k8s); err != nil {
|
||||
if err := SetupCerts(f, k8s, config.Node{}); err != nil {
|
||||
t.Fatalf("Error starting cluster: %v", err)
|
||||
}
|
||||
for _, cert := range filesToBeTransferred {
|
||||
|
|
|
@ -39,7 +39,6 @@ import (
|
|||
"k8s.io/client-go/kubernetes"
|
||||
kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/minikube/pkg/drivers/kic"
|
||||
"k8s.io/minikube/pkg/drivers/kic/oci"
|
||||
"k8s.io/minikube/pkg/kapi"
|
||||
"k8s.io/minikube/pkg/minikube/assets"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper"
|
||||
|
@ -181,9 +180,6 @@ func (k *Bootstrapper) createCompatSymlinks() error {
|
|||
|
||||
// StartCluster starts the cluster
|
||||
func (k *Bootstrapper) StartCluster(cfg config.MachineConfig) error {
|
||||
if driver.IsKIC(cfg.VMDriver) {
|
||||
cfg.KubernetesConfig.NodeIP = kic.DefaultBindIPV4
|
||||
}
|
||||
err := bsutil.ExistingConfig(k.c)
|
||||
if err == nil { // if there is an existing cluster don't reconfigure it
|
||||
return k.restartCluster(cfg)
|
||||
|
@ -207,6 +203,11 @@ func (k *Bootstrapper) StartCluster(cfg config.MachineConfig) error {
|
|||
return err
|
||||
}
|
||||
|
||||
cp, err := config.PrimaryControlPlane(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ignore := []string{
|
||||
fmt.Sprintf("DirAvailable-%s", strings.Replace(vmpath.GuestManifestsDir, "/", "-", -1)),
|
||||
fmt.Sprintf("DirAvailable-%s", strings.Replace(vmpath.GuestPersistentDir, "/", "-", -1)),
|
||||
|
@ -248,7 +249,7 @@ func (k *Bootstrapper) StartCluster(cfg config.MachineConfig) error {
|
|||
if !driver.IsKIC(cfg.VMDriver) { // TODO: skip for both after verifications https://github.com/kubernetes/minikube/issues/6239
|
||||
glog.Infof("Configuring cluster permissions ...")
|
||||
elevate := func() error {
|
||||
client, err := k.client(cfg)
|
||||
client, err := k.client(cp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -268,23 +269,22 @@ func (k *Bootstrapper) StartCluster(cfg config.MachineConfig) error {
|
|||
}
|
||||
|
||||
// client sets and returns a Kubernetes client to use to speak to a kubeadm launched apiserver
|
||||
func (k *Bootstrapper) client(cfg config.MachineConfig) (*kubernetes.Clientset, error) {
|
||||
func (k *Bootstrapper) client(n config.Node) (*kubernetes.Clientset, error) {
|
||||
if k.k8sClient != nil {
|
||||
return k.k8sClient, nil
|
||||
}
|
||||
|
||||
config, err := kapi.ClientConfig(k.contextName)
|
||||
cc, err := kapi.ClientConfig(k.contextName)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "client config")
|
||||
}
|
||||
|
||||
ip, port := k.clientEndpointAddr(cfg)
|
||||
endpoint := fmt.Sprintf("https://%s", net.JoinHostPort(ip, strconv.Itoa(port)))
|
||||
if config.Host != endpoint {
|
||||
glog.Errorf("Overriding stale ClientConfig host %s with %s", config.Host, endpoint)
|
||||
config.Host = endpoint
|
||||
endpoint := fmt.Sprintf("https://%s", net.JoinHostPort(n.IP, strconv.Itoa(n.Port)))
|
||||
if cc.Host != endpoint {
|
||||
glog.Errorf("Overriding stale ClientConfig host %s with %s", cc.Host, endpoint)
|
||||
cc.Host = endpoint
|
||||
}
|
||||
c, err := kubernetes.NewForConfig(config)
|
||||
c, err := kubernetes.NewForConfig(cc)
|
||||
if err == nil {
|
||||
k.k8sClient = c
|
||||
}
|
||||
|
@ -294,16 +294,19 @@ func (k *Bootstrapper) client(cfg config.MachineConfig) (*kubernetes.Clientset,
|
|||
// WaitForCluster blocks until the cluster appears to be healthy
|
||||
func (k *Bootstrapper) WaitForCluster(cfg config.MachineConfig, timeout time.Duration) error {
|
||||
start := time.Now()
|
||||
ip, port := k.clientEndpointAddr(cfg)
|
||||
out.T(out.Waiting, "Waiting for cluster to come online ...")
|
||||
cp, err := config.PrimaryControlPlane(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := kverify.APIServerProcess(k.c, start, timeout); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := kverify.APIServerIsRunning(start, ip, port, timeout); err != nil {
|
||||
if err := kverify.APIServerIsRunning(start, cp.IP, cp.Port, timeout); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c, err := k.client(cfg)
|
||||
c, err := k.client(cp)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "get k8s client")
|
||||
}
|
||||
|
@ -357,7 +360,8 @@ func (k *Bootstrapper) restartCluster(cfg config.MachineConfig) error {
|
|||
return errors.Wrap(err, "apiserver healthz")
|
||||
}
|
||||
|
||||
client, err := k.client(cfg)
|
||||
for _, n := range cfg.Nodes {
|
||||
client, err := k.client(n)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "getting k8s client")
|
||||
}
|
||||
|
@ -374,6 +378,7 @@ func (k *Bootstrapper) restartCluster(cfg config.MachineConfig) error {
|
|||
if err := bsutil.AdjustResourceLimits(k.c); err != nil {
|
||||
glog.Warningf("unable to adjust resource limits: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -414,8 +419,8 @@ func (k *Bootstrapper) PullImages(k8s config.KubernetesConfig) error {
|
|||
}
|
||||
|
||||
// SetupCerts sets up certificates within the cluster.
|
||||
func (k *Bootstrapper) SetupCerts(k8s config.KubernetesConfig) error {
|
||||
return bootstrapper.SetupCerts(k.c, k8s)
|
||||
func (k *Bootstrapper) SetupCerts(k8s config.KubernetesConfig, n config.Node) error {
|
||||
return bootstrapper.SetupCerts(k.c, k8s, n)
|
||||
}
|
||||
|
||||
// UpdateCluster updates the cluster
|
||||
|
@ -430,17 +435,17 @@ func (k *Bootstrapper) UpdateCluster(cfg config.MachineConfig) error {
|
|||
out.FailureT("Unable to load cached images: {{.error}}", out.V{"error": err})
|
||||
}
|
||||
}
|
||||
r, err := cruntime.New(cruntime.Config{Type: cfg.ContainerRuntime,
|
||||
r, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime,
|
||||
Runner: k.c, Socket: cfg.KubernetesConfig.CRISocket})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "runtime")
|
||||
}
|
||||
kubeadmCfg, err := bsutil.GenerateKubeadmYAML(cfg.KubernetesConfig, r)
|
||||
kubeadmCfg, err := bsutil.GenerateKubeadmYAML(cfg, r)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "generating kubeadm cfg")
|
||||
}
|
||||
|
||||
kubeletCfg, err := bsutil.NewKubeletConfig(cfg.KubernetesConfig, r)
|
||||
kubeletCfg, err := bsutil.NewKubeletConfig(cfg, r)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "generating kubelet config")
|
||||
}
|
||||
|
@ -462,7 +467,7 @@ func (k *Bootstrapper) UpdateCluster(cfg config.MachineConfig) error {
|
|||
return errors.Wrap(err, "downloading binaries")
|
||||
}
|
||||
|
||||
var cniFile []byte = nil
|
||||
var cniFile []byte
|
||||
if cfg.KubernetesConfig.EnableDefaultCNI {
|
||||
cniFile = []byte(defaultCNIConfig)
|
||||
}
|
||||
|
@ -509,15 +514,3 @@ func (k *Bootstrapper) applyKicOverlay(cfg config.MachineConfig) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// clientEndpointAddr returns ip and port accessible for the kubernetes clients to talk to the cluster
|
||||
func (k *Bootstrapper) clientEndpointAddr(cfg config.MachineConfig) (string, int) {
|
||||
if driver.IsKIC(cfg.VMDriver) { // for kic we ask docker/podman what port it assigned to node port
|
||||
p, err := oci.HostPortBinding(cfg.VMDriver, cfg.Name, cfg.KubernetesConfig.NodePort)
|
||||
if err != nil {
|
||||
glog.Warningf("Error getting host bind port %q for api server for %q driver: %v ", p, cfg.VMDriver, err)
|
||||
}
|
||||
return kic.DefaultBindIPV4, p
|
||||
}
|
||||
return cfg.KubernetesConfig.NodeIP, cfg.KubernetesConfig.NodePort
|
||||
}
|
||||
|
|
|
@ -49,7 +49,6 @@ import (
|
|||
|
||||
"k8s.io/minikube/pkg/minikube/command"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
cfg "k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
|
@ -97,44 +96,44 @@ func init() {
|
|||
}
|
||||
|
||||
// CacheISO downloads and caches ISO.
|
||||
func CacheISO(config cfg.MachineConfig) error {
|
||||
if driver.BareMetal(config.VMDriver) {
|
||||
func CacheISO(cfg config.MachineConfig) error {
|
||||
if driver.BareMetal(cfg.VMDriver) {
|
||||
return nil
|
||||
}
|
||||
return config.Downloader.CacheMinikubeISOFromURL(config.MinikubeISO)
|
||||
return cfg.Downloader.CacheMinikubeISOFromURL(cfg.MinikubeISO)
|
||||
}
|
||||
|
||||
// StartHost starts a host VM.
|
||||
func StartHost(api libmachine.API, config cfg.MachineConfig) (*host.Host, error) {
|
||||
func StartHost(api libmachine.API, cfg config.MachineConfig) (*host.Host, error) {
|
||||
// Prevent machine-driver boot races, as well as our own certificate race
|
||||
releaser, err := acquireMachinesLock(config.Name)
|
||||
releaser, err := acquireMachinesLock(cfg.Name)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "boot lock")
|
||||
}
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
glog.Infof("releasing machines lock for %q, held for %s", config.Name, time.Since(start))
|
||||
glog.Infof("releasing machines lock for %q, held for %s", cfg.Name, time.Since(start))
|
||||
releaser.Release()
|
||||
}()
|
||||
|
||||
exists, err := api.Exists(config.Name)
|
||||
exists, err := api.Exists(cfg.Name)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "exists: %s", config.Name)
|
||||
return nil, errors.Wrapf(err, "exists: %s", cfg.Name)
|
||||
}
|
||||
if !exists {
|
||||
glog.Infoln("Machine does not exist... provisioning new machine")
|
||||
glog.Infof("Provisioning machine with config: %+v", config)
|
||||
return createHost(api, config)
|
||||
glog.Infof("Provisioning machine with config: %+v", cfg)
|
||||
return createHost(api, cfg)
|
||||
}
|
||||
|
||||
glog.Infoln("Skipping create...Using existing machine configuration")
|
||||
|
||||
h, err := api.Load(config.Name)
|
||||
h, err := api.Load(cfg.Name)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error loading existing host. Please try running [minikube delete], then run [minikube start] again.")
|
||||
}
|
||||
|
||||
if exists && config.Name == constants.DefaultMachineName {
|
||||
if exists && cfg.Name == constants.DefaultMachineName {
|
||||
out.T(out.Tip, "Tip: Use 'minikube start -p <name>' to create a new cluster, or 'minikube delete' to delete this one.")
|
||||
}
|
||||
|
||||
|
@ -145,9 +144,9 @@ func StartHost(api libmachine.API, config cfg.MachineConfig) (*host.Host, error)
|
|||
}
|
||||
|
||||
if s == state.Running {
|
||||
out.T(out.Running, `Using the running {{.driver_name}} "{{.profile_name}}" VM ...`, out.V{"driver_name": config.VMDriver, "profile_name": config.Name})
|
||||
out.T(out.Running, `Using the running {{.driver_name}} "{{.profile_name}}" VM ...`, out.V{"driver_name": cfg.VMDriver, "profile_name": cfg.Name})
|
||||
} else {
|
||||
out.T(out.Restarting, `Starting existing {{.driver_name}} VM for "{{.profile_name}}" ...`, out.V{"driver_name": config.VMDriver, "profile_name": config.Name})
|
||||
out.T(out.Restarting, `Starting existing {{.driver_name}} VM for "{{.profile_name}}" ...`, out.V{"driver_name": cfg.VMDriver, "profile_name": cfg.Name})
|
||||
if err := h.Driver.Start(); err != nil {
|
||||
return nil, errors.Wrap(err, "start")
|
||||
}
|
||||
|
@ -156,7 +155,7 @@ func StartHost(api libmachine.API, config cfg.MachineConfig) (*host.Host, error)
|
|||
}
|
||||
}
|
||||
|
||||
e := engineOptions(config)
|
||||
e := engineOptions(cfg)
|
||||
glog.Infof("engine options: %+v", e)
|
||||
|
||||
out.T(out.Waiting, "Waiting for the host to be provisioned ...")
|
||||
|
@ -383,12 +382,12 @@ func GetHostDriverIP(api libmachine.API, machineName string) (net.IP, error) {
|
|||
return ip, nil
|
||||
}
|
||||
|
||||
func engineOptions(config cfg.MachineConfig) *engine.Options {
|
||||
func engineOptions(cfg config.MachineConfig) *engine.Options {
|
||||
o := engine.Options{
|
||||
Env: config.DockerEnv,
|
||||
InsecureRegistry: append([]string{constants.DefaultServiceCIDR}, config.InsecureRegistry...),
|
||||
RegistryMirror: config.RegistryMirror,
|
||||
ArbitraryFlags: config.DockerOpt,
|
||||
Env: cfg.DockerEnv,
|
||||
InsecureRegistry: append([]string{constants.DefaultServiceCIDR}, cfg.InsecureRegistry...),
|
||||
RegistryMirror: cfg.RegistryMirror,
|
||||
ArbitraryFlags: cfg.DockerOpt,
|
||||
InstallURL: drivers.DefaultEngineInstallURL,
|
||||
}
|
||||
return &o
|
||||
|
@ -463,48 +462,48 @@ func showRemoteOsRelease(driver drivers.Driver) {
|
|||
}
|
||||
|
||||
// showHostInfo shows host information
|
||||
func showHostInfo(config cfg.MachineConfig) {
|
||||
if driver.BareMetal(config.VMDriver) {
|
||||
func showHostInfo(cfg config.MachineConfig) {
|
||||
if driver.BareMetal(cfg.VMDriver) {
|
||||
info, err := getHostInfo()
|
||||
if err == nil {
|
||||
out.T(out.StartingNone, "Running on localhost (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...", out.V{"number_of_cpus": info.CPUs, "memory_size": info.Memory, "disk_size": info.DiskSize})
|
||||
}
|
||||
} else if driver.IsKIC(config.VMDriver) {
|
||||
} else if driver.IsKIC(cfg.VMDriver) {
|
||||
info, err := getHostInfo() // TODO medyagh: get docker-machine info for non linux
|
||||
if err == nil {
|
||||
out.T(out.StartingVM, "Creating Kubernetes in {{.driver_name}} container with (CPUs={{.number_of_cpus}}), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...", out.V{"driver_name": config.VMDriver, "number_of_cpus": config.CPUs, "number_of_host_cpus": info.CPUs, "memory_size": config.Memory, "host_memory_size": info.Memory})
|
||||
out.T(out.StartingVM, "Creating Kubernetes in {{.driver_name}} container with (CPUs={{.number_of_cpus}}), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...", out.V{"driver_name": cfg.VMDriver, "number_of_cpus": cfg.CPUs, "number_of_host_cpus": info.CPUs, "memory_size": cfg.Memory, "host_memory_size": info.Memory})
|
||||
}
|
||||
} else {
|
||||
out.T(out.StartingVM, "Creating {{.driver_name}} VM (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...", out.V{"driver_name": config.VMDriver, "number_of_cpus": config.CPUs, "memory_size": config.Memory, "disk_size": config.DiskSize})
|
||||
out.T(out.StartingVM, "Creating {{.driver_name}} VM (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...", out.V{"driver_name": cfg.VMDriver, "number_of_cpus": cfg.CPUs, "memory_size": cfg.Memory, "disk_size": cfg.DiskSize})
|
||||
}
|
||||
}
|
||||
|
||||
func createHost(api libmachine.API, config cfg.MachineConfig) (*host.Host, error) {
|
||||
if config.VMDriver == driver.VMwareFusion && viper.GetBool(cfg.ShowDriverDeprecationNotification) {
|
||||
func createHost(api libmachine.API, cfg config.MachineConfig) (*host.Host, error) {
|
||||
if cfg.VMDriver == driver.VMwareFusion && viper.GetBool(config.ShowDriverDeprecationNotification) {
|
||||
out.WarningT(`The vmwarefusion driver is deprecated and support for it will be removed in a future release.
|
||||
Please consider switching to the new vmware unified driver, which is intended to replace the vmwarefusion driver.
|
||||
See https://minikube.sigs.k8s.io/docs/reference/drivers/vmware/ for more information.
|
||||
To disable this message, run [minikube config set ShowDriverDeprecationNotification false]`)
|
||||
}
|
||||
showHostInfo(config)
|
||||
def := registry.Driver(config.VMDriver)
|
||||
showHostInfo(cfg)
|
||||
def := registry.Driver(cfg.VMDriver)
|
||||
if def.Empty() {
|
||||
return nil, fmt.Errorf("unsupported/missing driver: %s", config.VMDriver)
|
||||
return nil, fmt.Errorf("unsupported/missing driver: %s", cfg.VMDriver)
|
||||
}
|
||||
dd := def.Config(config)
|
||||
dd := def.Config(cfg)
|
||||
data, err := json.Marshal(dd)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "marshal")
|
||||
}
|
||||
|
||||
h, err := api.NewHost(config.VMDriver, data)
|
||||
h, err := api.NewHost(cfg.VMDriver, data)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "new host")
|
||||
}
|
||||
|
||||
h.HostOptions.AuthOptions.CertDir = localpath.MiniPath()
|
||||
h.HostOptions.AuthOptions.StorePath = localpath.MiniPath()
|
||||
h.HostOptions.EngineOptions = engineOptions(config)
|
||||
h.HostOptions.EngineOptions = engineOptions(cfg)
|
||||
|
||||
if err := api.Create(h); err != nil {
|
||||
// Wait for all the logs to reach the client
|
||||
|
@ -516,9 +515,9 @@ func createHost(api libmachine.API, config cfg.MachineConfig) (*host.Host, error
|
|||
return h, errors.Wrap(err, "required directories")
|
||||
}
|
||||
|
||||
if driver.BareMetal(config.VMDriver) {
|
||||
if driver.BareMetal(cfg.VMDriver) {
|
||||
showLocalOsRelease()
|
||||
} else if !driver.BareMetal(config.VMDriver) && !driver.IsKIC(config.VMDriver) {
|
||||
} else if !driver.BareMetal(cfg.VMDriver) && !driver.IsKIC(cfg.VMDriver) {
|
||||
showRemoteOsRelease(h.Driver)
|
||||
// Ensure that even new VM's have proper time synchronization up front
|
||||
// It's 2019, and I can't believe I am still dealing with time desync as a problem.
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
// AddNode adds a new node config to an existing cluster.
|
||||
func AddNode(cc *MachineConfig, name string, controlPlane bool, k8sVersion string, profileName string) error {
|
||||
node := Node{
|
||||
Name: name,
|
||||
Worker: true,
|
||||
}
|
||||
|
||||
if controlPlane {
|
||||
node.ControlPlane = true
|
||||
}
|
||||
|
||||
if k8sVersion != "" {
|
||||
node.KubernetesVersion = k8sVersion
|
||||
}
|
||||
|
||||
cc.Nodes = append(cc.Nodes, node)
|
||||
return SaveProfile(profileName, cc)
|
||||
}
|
|
@ -18,6 +18,7 @@ package config
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -35,22 +36,31 @@ func (p *Profile) IsValid() bool {
|
|||
if p.Config == nil {
|
||||
return false
|
||||
}
|
||||
if len(p.Config) == 0 {
|
||||
if p.Config == nil {
|
||||
return false
|
||||
}
|
||||
// This will become a loop for multinode
|
||||
if p.Config[0] == nil {
|
||||
if p.Config.VMDriver == "" {
|
||||
return false
|
||||
}
|
||||
if p.Config[0].VMDriver == "" {
|
||||
for _, n := range p.Config.Nodes {
|
||||
if n.KubernetesVersion == "" {
|
||||
return false
|
||||
}
|
||||
if p.Config[0].KubernetesConfig.KubernetesVersion == "" {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// PrimaryControlPlane gets the node specific config for the first created control plane
|
||||
func PrimaryControlPlane(cc MachineConfig) (Node, error) {
|
||||
for _, n := range cc.Nodes {
|
||||
if n.ControlPlane {
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
|
||||
return Node{}, errors.New("could not find master node")
|
||||
}
|
||||
|
||||
// ProfileNameInReservedKeywords checks if the profile is an internal keywords
|
||||
func ProfileNameInReservedKeywords(name string) bool {
|
||||
for _, v := range keywords {
|
||||
|
@ -76,11 +86,11 @@ func ProfileExists(name string, miniHome ...string) bool {
|
|||
// CreateEmptyProfile creates an empty profile and stores in $MINIKUBE_HOME/profiles/<profilename>/config.json
|
||||
func CreateEmptyProfile(name string, miniHome ...string) error {
|
||||
cfg := &MachineConfig{}
|
||||
return CreateProfile(name, cfg, miniHome...)
|
||||
return SaveProfile(name, cfg, miniHome...)
|
||||
}
|
||||
|
||||
// CreateProfile creates a profile out of the cfg and stores in $MINIKUBE_HOME/profiles/<profilename>/config.json
|
||||
func CreateProfile(name string, cfg *MachineConfig, miniHome ...string) error {
|
||||
// SaveProfile creates an profile out of the cfg and stores in $MINIKUBE_HOME/profiles/<profilename>/config.json
|
||||
func SaveProfile(name string, cfg *MachineConfig, miniHome ...string) error {
|
||||
data, err := json.MarshalIndent(cfg, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -160,7 +170,7 @@ func LoadProfile(name string, miniHome ...string) (*Profile, error) {
|
|||
cfg, err := DefaultLoader.LoadConfigFromFile(name, miniHome...)
|
||||
p := &Profile{
|
||||
Name: name,
|
||||
Config: []*MachineConfig{cfg},
|
||||
Config: cfg,
|
||||
}
|
||||
return p, err
|
||||
}
|
||||
|
|
|
@ -54,8 +54,8 @@ func TestListProfiles(t *testing.T) {
|
|||
if val[tt.index].Name != tt.expectName {
|
||||
t.Errorf("expected %s got %v", tt.expectName, val[tt.index].Name)
|
||||
}
|
||||
if val[tt.index].Config[0].VMDriver != tt.vmDriver {
|
||||
t.Errorf("expected %s got %v", tt.vmDriver, val[tt.index].Config[0].VMDriver)
|
||||
if val[tt.index].Config.VMDriver != tt.vmDriver {
|
||||
t.Errorf("expected %s got %v", tt.vmDriver, val[tt.index].Config.VMDriver)
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -176,7 +176,7 @@ func TestCreateProfile(t *testing.T) {
|
|||
}
|
||||
for _, tc := range testCases {
|
||||
n := tc.name // capturing loop variable
|
||||
gotErr := CreateProfile(n, tc.cfg, miniDir)
|
||||
gotErr := SaveProfile(n, tc.cfg, miniDir)
|
||||
if gotErr != nil && tc.expectErr == false {
|
||||
t.Errorf("expected CreateEmptyProfile not to error but got err=%v", gotErr)
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
type Profile struct {
|
||||
Name string
|
||||
Status string // running, stopped
|
||||
Config []*MachineConfig
|
||||
Config *MachineConfig
|
||||
}
|
||||
|
||||
// MachineConfig contains the parameters used to start a cluster.
|
||||
|
@ -40,7 +40,6 @@ type MachineConfig struct {
|
|||
CPUs int
|
||||
DiskSize int
|
||||
VMDriver string
|
||||
ContainerRuntime string
|
||||
HyperkitVpnKitSock string // Only used by the Hyperkit driver
|
||||
HyperkitVSockPorts []string // Only used by the Hyperkit driver
|
||||
DockerEnv []string // Each entry is formatted as KEY=VALUE.
|
||||
|
@ -61,18 +60,16 @@ type MachineConfig struct {
|
|||
NoVTXCheck bool // Only used by virtualbox
|
||||
DNSProxy bool // Only used by virtualbox
|
||||
HostDNSResolver bool // Only used by virtualbox
|
||||
KubernetesConfig KubernetesConfig
|
||||
HostOnlyNicType string // Only used by virtualbox
|
||||
NatNicType string // Only used by virtualbox
|
||||
KubernetesConfig KubernetesConfig
|
||||
Nodes []Node
|
||||
Addons map[string]bool
|
||||
}
|
||||
|
||||
// KubernetesConfig contains the parameters used to configure the VM Kubernetes.
|
||||
type KubernetesConfig struct {
|
||||
KubernetesVersion string
|
||||
NodeIP string
|
||||
NodePort int // kubernetes api server port
|
||||
NodeName string
|
||||
APIServerName string
|
||||
APIServerNames []string
|
||||
APIServerIPs []net.IP
|
||||
|
@ -89,6 +86,16 @@ type KubernetesConfig struct {
|
|||
EnableDefaultCNI bool
|
||||
}
|
||||
|
||||
// Node contains information about specific nodes in a cluster
|
||||
type Node struct {
|
||||
Name string
|
||||
IP string
|
||||
Port int
|
||||
KubernetesVersion string
|
||||
ControlPlane bool
|
||||
Worker bool
|
||||
}
|
||||
|
||||
// VersionedExtraOption holds information on flags to apply to a specific range
|
||||
// of versions
|
||||
type VersionedExtraOption struct {
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
apiVersion: v1
|
||||
clusters: []
|
||||
contexts: []
|
||||
clusters: null
|
||||
contexts: null
|
||||
current-context: minikube
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users: []
|
||||
users: null
|
||||
|
|
|
@ -71,7 +71,7 @@ func LoadImages(cc *config.MachineConfig, runner command.Runner, images []string
|
|||
}()
|
||||
|
||||
var g errgroup.Group
|
||||
cr, err := cruntime.New(cruntime.Config{Type: cc.ContainerRuntime, Runner: runner})
|
||||
cr, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime, Runner: runner})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "runtime")
|
||||
}
|
||||
|
|
|
@ -48,7 +48,6 @@ func configure(mc config.MachineConfig) interface{} {
|
|||
ImageDigest: kic.BaseImage,
|
||||
CPU: mc.CPUs,
|
||||
Memory: mc.Memory,
|
||||
APIServerPort: mc.KubernetesConfig.NodePort,
|
||||
OCIBinary: oci.Docker,
|
||||
})
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ func configure(mc config.MachineConfig) interface{} {
|
|||
return none.NewDriver(none.Config{
|
||||
MachineName: mc.Name,
|
||||
StorePath: localpath.MiniPath(),
|
||||
ContainerRuntime: mc.ContainerRuntime,
|
||||
ContainerRuntime: mc.KubernetesConfig.ContainerRuntime,
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -262,7 +262,7 @@ func setContainerRuntimeOptions(p *BuildrootProvisioner) error {
|
|||
return errors.Wrap(err, "getting cluster config")
|
||||
}
|
||||
|
||||
switch c.ContainerRuntime {
|
||||
switch c.KubernetesConfig.ContainerRuntime {
|
||||
case "crio", "cri-o":
|
||||
return p.setCrioOptions()
|
||||
case "containerd":
|
||||
|
|
|
@ -111,7 +111,7 @@ func TestDownloadOnly(t *testing.T) {
|
|||
got := ""
|
||||
for _, p := range ps["valid"] {
|
||||
if p.Name == profile {
|
||||
got = p.Config[0].VMDriver
|
||||
got = p.Config.VMDriver
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue