Merge conflict

pull/7554/head
Thomas Stromberg 2020-04-09 17:34:30 -07:00
commit 0b4e5e3655
76 changed files with 1497 additions and 538 deletions

View File

@ -111,7 +111,7 @@ MINIKUBE_TEST_FILES := ./cmd/... ./pkg/...
MARKDOWNLINT ?= markdownlint
MINIKUBE_MARKDOWN_FILES := README.md docs CONTRIBUTING.md CHANGELOG.md
MINIKUBE_MARKDOWN_FILES := README.md CONTRIBUTING.md CHANGELOG.md
MINIKUBE_BUILD_TAGS := container_image_ostree_stub containers_image_openpgp
MINIKUBE_BUILD_TAGS += go_getter_nos3 go_getter_nogcs

View File

@ -16,7 +16,8 @@
minikube implements a local Kubernetes cluster on macOS, Linux, and Windows. minikube's [primary goals](https://minikube.sigs.k8s.io/docs/concepts/principles/) are to be the best tool for local Kubernetes application development and to support all Kubernetes features that fit.
<img src="https://github.com/kubernetes/minikube/raw/master/site/content/en/start.png" width="738" alt="screenshot">
<img src="https://raw.githubusercontent.com/kubernetes/minikube/master/site/static/images/screenshot.png" width="738" alt="screenshot">
## Features

View File

@ -29,6 +29,7 @@ import (
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/mustload"
"k8s.io/minikube/pkg/minikube/out"
)
@ -49,11 +50,12 @@ var addonsListCmd = &cobra.Command{
exit.UsageT("usage: minikube addons list")
}
_, cc := mustload.Partial(ClusterFlagValue())
switch strings.ToLower(addonListOutput) {
case "list":
printAddonsList()
printAddonsList(cc)
case "json":
printAddonsJSON()
printAddonsJSON(cc)
default:
exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'list', 'json'", addonListOutput))
}
@ -85,27 +87,24 @@ var stringFromStatus = func(addonStatus bool) string {
return "disabled"
}
var printAddonsList = func() {
var printAddonsList = func(cc *config.ClusterConfig) {
addonNames := make([]string, 0, len(assets.Addons))
for addonName := range assets.Addons {
addonNames = append(addonNames, addonName)
}
sort.Strings(addonNames)
var tData [][]string
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Addon Name", "Profile", "Status"})
table.SetAutoFormatHeaders(true)
table.SetBorders(tablewriter.Border{Left: true, Top: true, Right: true, Bottom: true})
table.SetCenterSeparator("|")
pName := ClusterFlagValue()
for _, addonName := range addonNames {
addonBundle := assets.Addons[addonName]
addonStatus, err := addonBundle.IsEnabled(pName)
if err != nil {
out.WarningT("Unable to get addon status for {{.name}}: {{.error}}", out.V{"name": addonName, "error": err})
}
tData = append(tData, []string{addonName, pName, fmt.Sprintf("%s %s", stringFromStatus(addonStatus), iconFromStatus(addonStatus))})
enabled := addonBundle.IsEnabled(cc)
tData = append(tData, []string{addonName, cc.Name, fmt.Sprintf("%s %s", stringFromStatus(enabled), iconFromStatus(enabled))})
}
table.AppendBulk(tData)
@ -120,9 +119,8 @@ var printAddonsList = func() {
}
}
var printAddonsJSON = func() {
var printAddonsJSON = func(cc *config.ClusterConfig) {
addonNames := make([]string, 0, len(assets.Addons))
pName := ClusterFlagValue()
for addonName := range assets.Addons {
addonNames = append(addonNames, addonName)
}
@ -132,16 +130,11 @@ var printAddonsJSON = func() {
for _, addonName := range addonNames {
addonBundle := assets.Addons[addonName]
addonStatus, err := addonBundle.IsEnabled(pName)
if err != nil {
glog.Errorf("Unable to get addon status for %s: %v", addonName, err)
continue
}
enabled := addonBundle.IsEnabled(cc)
addonsMap[addonName] = map[string]interface{}{
"Status": stringFromStatus(addonStatus),
"Profile": pName,
"Status": stringFromStatus(enabled),
"Profile": cc.Name,
}
}
jsonString, _ := json.Marshal(addonsMap)

View File

@ -33,7 +33,7 @@ var addonsDisableCmd = &cobra.Command{
}
addon := args[0]
err := addons.Set(addon, "false", ClusterFlagValue())
err := addons.SetAndSave(ClusterFlagValue(), addon, "false")
if err != nil {
exit.WithError("disable failed", err)
}

View File

@ -32,7 +32,7 @@ var addonsEnableCmd = &cobra.Command{
exit.UsageT("usage: minikube addons enable ADDON_NAME")
}
addon := args[0]
err := addons.Set(addon, "true", ClusterFlagValue())
err := addons.SetAndSave(ClusterFlagValue(), addon, "true")
if err != nil {
exit.WithError("enable failed", err)
}

View File

@ -66,11 +66,9 @@ var addonsOpenCmd = &cobra.Command{
To see the list of available addons run:
minikube addons list`, out.V{"name": addonName})
}
ok, err := addon.IsEnabled(cname)
if err != nil {
exit.WithError("IsEnabled failed", err)
}
if !ok {
enabled := addon.IsEnabled(co.Config)
if !enabled {
exit.WithCodeT(exit.Unavailable, `addon '{{.name}}' is currently not enabled.
To enable this addon run:
minikube addons enable {{.name}}`, out.V{"name": addonName})

View File

@ -67,13 +67,14 @@ var dashboardCmd = &cobra.Command{
var err error
// Check dashboard status before enabling it
dashboardAddon := assets.Addons["dashboard"]
dashboardStatus, _ := dashboardAddon.IsEnabled(cname)
if !dashboardStatus {
addon := assets.Addons["dashboard"]
enabled := addon.IsEnabled(co.Config)
if !enabled {
// Send status messages to stderr for folks re-using this output.
out.ErrT(out.Enabling, "Enabling dashboard ...")
// Enable the dashboard add-on
err = addons.Set("dashboard", "true", cname)
err = addons.SetAndSave(cname, "dashboard", "true")
if err != nil {
exit.WithError("Unable to enable dashboard", err)
}

View File

@ -208,7 +208,13 @@ func deleteProfileContainersAndVolumes(name string) {
func deleteProfile(profile *config.Profile) error {
viper.Set(config.ProfileName, profile.Name)
deleteProfileContainersAndVolumes(profile.Name)
if profile.Config != nil {
// if driver is oci driver, delete containers and volumes
if driver.IsKIC(profile.Config.Driver) {
out.T(out.DeletingHost, `Deleting "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": profile.Name, "driver_name": profile.Config.Driver})
deleteProfileContainersAndVolumes(profile.Name)
}
}
api, err := machine.NewAPIClient()
if err != nil {

View File

@ -20,6 +20,7 @@ import (
"github.com/spf13/cobra"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/mustload"
"k8s.io/minikube/pkg/minikube/node"
"k8s.io/minikube/pkg/minikube/out"
@ -54,7 +55,10 @@ var nodeAddCmd = &cobra.Command{
}
if err := node.Add(cc, n); err != nil {
maybeDeleteAndRetry(*cc, n, nil, err)
_, err := maybeDeleteAndRetry(*cc, n, nil, err)
if err != nil {
exit.WithError("failed to add node", err)
}
}
out.T(out.Ready, "Successfully added {{.name}} to {{.cluster}}!", out.V{"name": name, "cluster": cc.Name})

View File

@ -49,9 +49,27 @@ var nodeStartCmd = &cobra.Command{
exit.WithError("retrieving node", err)
}
_, err = node.Start(*cc, *n, nil, false)
r, p, m, h, err := node.Provision(cc, n, false)
if err != nil {
maybeDeleteAndRetry(*cc, *n, nil, err)
exit.WithError("provisioning host for node", err)
}
s := node.Starter{
Runner: r,
PreExists: p,
MachineAPI: m,
Host: h,
Cfg: cc,
Node: n,
ExistingAddons: nil,
}
_, err = node.Start(s, false)
if err != nil {
_, err := maybeDeleteAndRetry(*cc, *n, nil, err)
if err != nil {
exit.WithError("failed to start node", err)
}
}
},
}

View File

@ -150,11 +150,59 @@ func runStart(cmd *cobra.Command, args []string) {
}
validateSpecifiedDriver(existing)
ds := selectDriver(existing)
ds, alts, specified := selectDriver(existing)
starter, err := provisionWithDriver(cmd, ds, existing)
if err != nil {
if specified {
// If the user specified a driver, don't fallback to anything else
exit.WithError("error provisioning host", err)
} else {
success := false
// Walk down the rest of the options
for _, alt := range alts {
out.WarningT("Startup with {{.old_driver}} driver failed, trying with alternate driver {{.new_driver}}: {{.error}}", out.V{"old_driver": ds.Name, "new_driver": alt.Name, "error": err})
ds = alt
// Delete the existing cluster and try again with the next driver on the list
profile, err := config.LoadProfile(ClusterFlagValue())
if err != nil {
glog.Warningf("%s profile does not exist, trying anyways.", ClusterFlagValue())
}
err = deleteProfile(profile)
if err != nil {
out.WarningT("Failed to delete cluster {{.name}}, proceeding with retry anyway.", out.V{"name": ClusterFlagValue()})
}
starter, err = provisionWithDriver(cmd, ds, existing)
if err != nil {
continue
} else {
// Success!
success = true
break
}
}
if !success {
exit.WithError("error provisioning host", err)
}
}
}
kubeconfig, err := startWithDriver(starter, existing)
if err != nil {
exit.WithError("failed to start node", err)
}
if err := showKubectlInfo(kubeconfig, starter.Node.KubernetesVersion, starter.Cfg.Name); err != nil {
glog.Errorf("kubectl info: %v", err)
}
}
func provisionWithDriver(cmd *cobra.Command, ds registry.DriverState, existing *config.ClusterConfig) (node.Starter, error) {
driverName := ds.Name
glog.Infof("selected driver: %s", driverName)
validateDriver(ds, existing)
err = autoSetDriverOptions(cmd, driverName)
err := autoSetDriverOptions(cmd, driverName)
if err != nil {
glog.Errorf("Error autoSetOptions : %v", err)
}
@ -170,19 +218,19 @@ func runStart(cmd *cobra.Command, args []string) {
k8sVersion := getKubernetesVersion(existing)
cc, n, err := generateClusterConfig(cmd, existing, k8sVersion, driverName)
if err != nil {
exit.WithError("Failed to generate config", err)
return node.Starter{}, errors.Wrap(err, "Failed to generate config")
}
// This is about as far as we can go without overwriting config files
if viper.GetBool(dryRun) {
out.T(out.DryRun, `dry-run validation complete!`)
return
os.Exit(0)
}
if driver.IsVM(driverName) {
url, err := download.ISO(viper.GetStringSlice(isoURL), cmd.Flags().Changed(isoURL))
if err != nil {
exit.WithError("Failed to cache ISO", err)
return node.Starter{}, errors.Wrap(err, "Failed to cache ISO")
}
cc.MinikubeISO = url
}
@ -201,9 +249,29 @@ func runStart(cmd *cobra.Command, args []string) {
}
}
kubeconfig, err := node.Start(cc, n, existingAddons, true)
mRunner, preExists, mAPI, host, err := node.Provision(&cc, &n, true)
if err != nil {
kubeconfig = maybeDeleteAndRetry(cc, n, existingAddons, err)
return node.Starter{}, err
}
return node.Starter{
Runner: mRunner,
PreExists: preExists,
MachineAPI: mAPI,
Host: host,
ExistingAddons: existingAddons,
Cfg: &cc,
Node: &n,
}, nil
}
func startWithDriver(starter node.Starter, existing *config.ClusterConfig) (*kubeconfig.Settings, error) {
kubeconfig, err := node.Start(starter, true)
if err != nil {
kubeconfig, err = maybeDeleteAndRetry(*starter.Cfg, *starter.Node, starter.ExistingAddons, err)
if err != nil {
return nil, err
}
}
numNodes := viper.GetInt(nodes)
@ -211,7 +279,7 @@ func runStart(cmd *cobra.Command, args []string) {
numNodes = len(existing.Nodes)
}
if numNodes > 1 {
if driver.BareMetal(driverName) {
if driver.BareMetal(starter.Cfg.Driver) {
exit.WithCodeT(exit.Config, "The none driver is not compatible with multi-node clusters.")
} else {
for i := 1; i < numNodes; i++ {
@ -220,20 +288,18 @@ func runStart(cmd *cobra.Command, args []string) {
Name: nodeName,
Worker: true,
ControlPlane: false,
KubernetesVersion: cc.KubernetesConfig.KubernetesVersion,
KubernetesVersion: starter.Cfg.KubernetesConfig.KubernetesVersion,
}
out.Ln("") // extra newline for clarity on the command line
err := node.Add(&cc, n)
err := node.Add(starter.Cfg, n)
if err != nil {
exit.WithError("adding node", err)
return nil, errors.Wrap(err, "adding node")
}
}
}
}
if err := showKubectlInfo(kubeconfig, cc.KubernetesConfig.KubernetesVersion, cc.Name); err != nil {
glog.Errorf("kubectl info: %v", err)
}
return kubeconfig, nil
}
func updateDriver(driverName string) {
@ -303,7 +369,7 @@ func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName st
return nil
}
func maybeDeleteAndRetry(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool, originalErr error) *kubeconfig.Settings {
func maybeDeleteAndRetry(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool, originalErr error) (*kubeconfig.Settings, error) {
if viper.GetBool(deleteOnFailure) {
out.WarningT("Node {{.name}} failed to start, deleting and trying again.", out.V{"name": n.Name})
// Start failed, delete the cluster and try again
@ -318,21 +384,35 @@ func maybeDeleteAndRetry(cc config.ClusterConfig, n config.Node, existingAddons
}
var kubeconfig *kubeconfig.Settings
for _, v := range cc.Nodes {
k, err := node.Start(cc, v, existingAddons, v.ControlPlane)
if v.ControlPlane {
for _, n := range cc.Nodes {
r, p, m, h, err := node.Provision(&cc, &n, n.ControlPlane)
s := node.Starter{
Runner: r,
PreExists: p,
MachineAPI: m,
Host: h,
Cfg: &cc,
Node: &n,
ExistingAddons: existingAddons,
}
if err != nil {
// Ok we failed again, let's bail
return nil, err
}
k, err := node.Start(s, n.ControlPlane)
if n.ControlPlane {
kubeconfig = k
}
if err != nil {
// Ok we failed again, let's bail
exit.WithError("Start failed after cluster deletion", err)
return nil, err
}
}
return kubeconfig
return kubeconfig, nil
}
// Don't delete the cluster unless they ask
exit.WithError("startup failed", originalErr)
return nil
return nil, errors.Wrap(originalErr, "startup failed")
}
func kubectlVersion(path string) (string, error) {
@ -360,7 +440,7 @@ func kubectlVersion(path string) (string, error) {
return cv.ClientVersion.GitVersion, nil
}
func selectDriver(existing *config.ClusterConfig) registry.DriverState {
func selectDriver(existing *config.ClusterConfig) (registry.DriverState, []registry.DriverState, bool) {
// Technically unrelated, but important to perform before detection
driver.SetLibvirtURI(viper.GetString(kvmQemuURI))
@ -369,7 +449,7 @@ func selectDriver(existing *config.ClusterConfig) registry.DriverState {
old := hostDriver(existing)
ds := driver.Status(old)
out.T(out.Sparkle, `Using the {{.driver}} driver based on existing profile`, out.V{"driver": ds.String()})
return ds
return ds, nil, true
}
// Default to looking at the new driver parameter
@ -389,7 +469,7 @@ func selectDriver(existing *config.ClusterConfig) registry.DriverState {
exit.WithCodeT(exit.Unavailable, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": d, "os": runtime.GOOS})
}
out.T(out.Sparkle, `Using the {{.driver}} driver based on user configuration`, out.V{"driver": ds.String()})
return ds
return ds, nil, true
}
// Fallback to old driver parameter
@ -399,7 +479,7 @@ func selectDriver(existing *config.ClusterConfig) registry.DriverState {
exit.WithCodeT(exit.Unavailable, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": d, "os": runtime.GOOS})
}
out.T(out.Sparkle, `Using the {{.driver}} driver based on user configuration`, out.V{"driver": ds.String()})
return ds
return ds, nil, true
}
choices := driver.Choices(viper.GetBool("vm"))
@ -422,7 +502,7 @@ func selectDriver(existing *config.ClusterConfig) registry.DriverState {
} else {
out.T(out.Sparkle, `Automatically selected the {{.driver}} driver`, out.V{"driver": pick.String()})
}
return pick
return pick, alts, false
}
// hostDriver returns the actual driver used by a libmachine host, which can differ from our config

View File

@ -1,49 +0,0 @@
# Advanced Topics and Tutorials
## Cluster Configuration
* **Alternative Runtimes** ([alternative_runtimes.md](alternative_runtimes.md)): How to run minikube without Docker as the container runtime
* **Environment Variables** ([env_vars.md](env_vars.md)): The different environment variables that minikube understands
* **Minikube Addons** ([addons.md](addons.md)): Information on configuring addons to be run on minikube
* **Configuring Kubernetes** ([configuring_kubernetes.md](configuring_kubernetes.md)): Configuring different Kubernetes components in minikube
* **Caching Images** ([cache.md](cache.md)): Caching non-minikube images in minikube
* **GPUs** ([gpu.md](gpu.md)): Using NVIDIA GPUs on minikube
* **OpenID Connect Authentication** ([openid_connect_auth.md](openid_connect_auth.md)): Using OIDC Authentication on minikube
### Installation and debugging
* **Driver installation** ([drivers.md](drivers.md)): In depth instructions for installing the various hypervisor drivers
* **Debugging minikube** ([debugging.md](debugging.md)): General practices for debugging the minikube binary itself
### Developing on the minikube cluster
* **Reusing the Docker Daemon** ([reusing_the_docker_daemon.md](reusing_the_docker_daemon.md)): How to point your docker CLI to the docker daemon running inside minikube
* **Building images within the VM** ([building_images_within_the_vm.md](building_images_within_the_vm.md)): How to build a container image within the minikube VM
#### Storage
* **Persistent Volumes** ([persistent_volumes.md](persistent_volumes.md)): Persistent Volumes in Minikube and persisted locations in the VM
* **Host Folder Mounting** ([host_folder_mount.md](host_folder_mount.md)): How to mount your files from your host into the minikube VM
* **Syncing files into the VM** ([syncing-files.md](syncing-files.md)): How to sync files from your host into the minikube VM
#### Networking
* **HTTP Proxy** ([http_proxy.md](http_proxy.md)): Instruction on how to run minikube behind a HTTP Proxy
* **Insecure or Private Registries** ([insecure_registry.md](insecure_registry.md)): How to use private or insecure registries with minikube
* **Accessing etcd from inside the cluster** ([accessing_etcd.md](accessing_etcd.md))
* **Networking** ([networking.md](networking.md)): FAQ about networking between the host and minikube VM
* **Offline** ([offline.md](offline.md)): Details about using minikube offline

View File

@ -1 +0,0 @@
This document has moved to https://minikube.sigs.k8s.io/docs/tasks/accessing-host-resources/

View File

@ -1 +0,0 @@
This document has moved to https://minikube.sigs.k8s.io/docs/tasks/addons/

View File

@ -1 +0,0 @@
This document has moved to https://minikube.sigs.k8s.io/docs/reference/runtimes/

View File

@ -1 +0,0 @@
This document has moved to https://minikube.sigs.k8s.io/docs/tasks/building_within/

View File

@ -1 +0,0 @@
This document has moved to https://minikube.sigs.k8s.io/docs/tasks/caching

View File

@ -1 +0,0 @@
This document has moved to https://minikube.sigs.k8s.io/docs/reference/commands/

View File

@ -1 +0,0 @@
This document has moved to https://minikube.sigs.k8s.io/docs/reference/configuration/kubernetes/

View File

@ -1 +0,0 @@
This document has moved to https://minikube.sigs.k8s.io/docs/contributing/

View File

@ -1 +0,0 @@
This document has moved to https://minikube.sigs.k8s.io/docs/contributing/addons/

View File

@ -1 +0,0 @@
This document has moved to https://minikube.sigs.k8s.io/docs/contributing/drivers/

View File

@ -1 +0,0 @@
This document has moved to https://minikube.sigs.k8s.io/docs/contributing/building/

View File

@ -1 +0,0 @@
This document has moved to https://minikube.sigs.k8s.io/docs/contributing/building/

View File

@ -1 +0,0 @@
This document has moved to https://minikube.sigs.k8s.io/docs/contributing/iso/

View File

@ -1 +0,0 @@
This document has moved to https://minikube.sigs.k8s.io/docs/concepts/principles/

View File

@ -1 +0,0 @@
This document has moved to https://minikube.sigs.k8s.io/docs/contributing/releasing/

View File

@ -1 +0,0 @@
This document has moved to https://minikube.sigs.k8s.io/docs/contributing/roadmap/

View File

@ -1 +0,0 @@
This document has moved to https://minikube.sigs.k8s.io/docs/tasks/dashboard/

View File

@ -1 +0,0 @@
This document has moved to https://minikube.sigs.k8s.io/docs/tasks/debug/

View File

@ -1 +0,0 @@
This document has moved to https://minikube.sigs.k8s.io/docs/reference/drivers/

View File

@ -1 +0,0 @@
This document has moved to https://minikube.sigs.k8s.io/docs/reference/environment_variables

View File

@ -1 +0,0 @@
This document has moved to https://minikube.sigs.k8s.io/docs/tutorials/nvidia_gpu/

View File

@ -1 +0,0 @@
This document has moved to https://minikube.sigs.k8s.io/docs/tasks/mount/

View File

@ -1 +0,0 @@
This document has moved to https://minikube.sigs.k8s.io/docs/reference/networking/proxy/

View File

@ -1 +0,0 @@
This document has moved to https://minikube.sigs.k8s.io/docs/tasks/registry/

View File

@ -1,2 +0,0 @@
This document has moved to https://minikube.sigs.k8s.io/docs/reference/networking/

View File

@ -1 +0,0 @@
This document has moved to https://minikube.sigs.k8s.io/docs/reference/disk_cache/

View File

@ -1 +0,0 @@
This document has moved to https://minikube.sigs.k8s.io/docs/tutorials/openid_connect_auth/

View File

@ -1 +0,0 @@
This document has moved to https://minikube.sigs.k8s.io/docs/reference/persistent_volumes/

View File

@ -1 +0,0 @@
This document has moved to https://minikube.sigs.k8s.io/docs/tasks/docker_daemon/

View File

@ -1 +0,0 @@
This document has moved to https://minikube.sigs.k8s.io/docs/tasks/sync/

View File

@ -1 +0,0 @@
This document has moved to https://minikube.sigs.k8s.io/docs/tasks/loadbalancer/

View File

@ -1 +0,0 @@
This document has moved to https://minikube.sigs.k8s.io/docs/reference/drivers/none/

View File

@ -23,6 +23,7 @@ import (
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/golang/glog"
@ -41,32 +42,49 @@ import (
// defaultStorageClassProvisioner is the name of the default storage class provisioner
const defaultStorageClassProvisioner = "standard"
// Set sets a value
func Set(name, value, profile string) error {
glog.Infof("Setting %s=%s in profile %q", name, value, profile)
// RunCallbacks runs all actions associated to an addon, but does not set it (thread-safe)
func RunCallbacks(cc *config.ClusterConfig, name string, value string) error {
glog.Infof("Setting %s=%s in profile %q", name, value, cc.Name)
a, valid := isAddonValid(name)
if !valid {
return errors.Errorf("%s is not a valid addon", name)
}
cc, err := config.Load(profile)
if err != nil {
return errors.Wrap(err, "loading profile")
}
// Run any additional validations for this property
if err := run(cc, name, value, a.validations); err != nil {
return errors.Wrap(err, "running validations")
}
if err := a.set(cc, name, value); err != nil {
return errors.Wrap(err, "setting new value of addon")
}
// Run any callbacks for this property
if err := run(cc, name, value, a.callbacks); err != nil {
return errors.Wrap(err, "running callbacks")
}
return nil
}
// Set sets a value in the config (not threadsafe)
func Set(cc *config.ClusterConfig, name string, value string) error {
a, valid := isAddonValid(name)
if !valid {
return errors.Errorf("%s is not a valid addon", name)
}
return a.set(cc, name, value)
}
// SetAndSave sets a value and saves the config
func SetAndSave(profile string, name string, value string) error {
cc, err := config.Load(profile)
if err != nil {
return errors.Wrap(err, "loading profile")
}
if err := RunCallbacks(cc, name, value); err != nil {
return errors.Wrap(err, "run callbacks")
}
if err := Set(cc, name, value); err != nil {
return errors.Wrap(err, "set")
}
glog.Infof("Writing out %q config to set %s=%v...", profile, name, value)
return config.Write(profile, cc)
@ -87,7 +105,7 @@ func run(cc *config.ClusterConfig, name string, value string, fns []setFn) error
return nil
}
// SetBool sets a bool value
// SetBool sets a bool value in the config (not threadsafe)
func SetBool(cc *config.ClusterConfig, name string, val string) error {
b, err := strconv.ParseBool(val)
if err != nil {
@ -110,13 +128,7 @@ func enableOrDisableAddon(cc *config.ClusterConfig, name string, val string) err
addon := assets.Addons[name]
// check addon status before enabling/disabling it
alreadySet, err := isAddonAlreadySet(addon, enable, cc.Name)
if err != nil {
out.ErrT(out.Conflict, "{{.error}}", out.V{"error": err})
return err
}
if alreadySet {
if isAddonAlreadySet(cc, addon, enable) {
glog.Warningf("addon %s should already be in state %v", name, val)
if !enable {
return nil
@ -160,7 +172,7 @@ https://github.com/kubernetes/minikube/issues/7332`, out.V{"driver_name": cc.Dri
mName := driver.MachineName(*cc, cp)
host, err := machine.LoadHost(api, mName)
if err != nil || !machine.IsRunning(api, mName) {
glog.Warningf("%q is not running, writing %s=%v to disk and skipping enablement (err=%v)", mName, addon.Name(), enable, err)
glog.Warningf("%q is not running, setting %s=%v and skipping enablement (err=%v)", mName, addon.Name(), enable, err)
return nil
}
@ -173,19 +185,17 @@ https://github.com/kubernetes/minikube/issues/7332`, out.V{"driver_name": cc.Dri
return enableOrDisableAddonInternal(cc, addon, cmd, data, enable)
}
func isAddonAlreadySet(addon *assets.Addon, enable bool, profile string) (bool, error) {
addonStatus, err := addon.IsEnabled(profile)
if err != nil {
return false, errors.Wrap(err, "is enabled")
func isAddonAlreadySet(cc *config.ClusterConfig, addon *assets.Addon, enable bool) bool {
enabled := addon.IsEnabled(cc)
if enabled && enable {
return true
}
if addonStatus && enable {
return true, nil
} else if !addonStatus && !enable {
return true, nil
if !enabled && !enable {
return true
}
return false, nil
return false
}
func enableOrDisableAddonInternal(cc *config.ClusterConfig, addon *assets.Addon, cmd command.Runner, data interface{}, enable bool) error {
@ -197,7 +207,7 @@ func enableOrDisableAddonInternal(cc *config.ClusterConfig, addon *assets.Addon,
if addon.IsTemplate() {
f, err = addon.Evaluate(data)
if err != nil {
return errors.Wrapf(err, "evaluate bundled addon %s asset", addon.GetAssetName())
return errors.Wrapf(err, "evaluate bundled addon %s asset", addon.GetSourcePath())
}
} else {
@ -287,7 +297,10 @@ func enableOrDisableStorageClasses(cc *config.ClusterConfig, name string, val st
}
// Start enables the default addons for a profile, plus any additional
func Start(profile string, toEnable map[string]bool, additional []string) {
func Start(wg *sync.WaitGroup, cc *config.ClusterConfig, toEnable map[string]bool, additional []string) {
wg.Add(1)
defer wg.Done()
start := time.Now()
glog.Infof("enableAddons start: toEnable=%v, additional=%s", toEnable, additional)
defer func() {
@ -296,11 +309,7 @@ func Start(profile string, toEnable map[string]bool, additional []string) {
// Get the default values of any addons not saved to our config
for name, a := range assets.Addons {
defaultVal, err := a.IsEnabled(profile)
if err != nil {
glog.Errorf("is-enabled failed for %q: %v", a.Name(), err)
continue
}
defaultVal := a.IsEnabled(cc)
_, exists := toEnable[name]
if !exists {
@ -321,12 +330,25 @@ func Start(profile string, toEnable map[string]bool, additional []string) {
}
sort.Strings(toEnableList)
var awg sync.WaitGroup
out.T(out.AddonEnable, "Enabling addons: {{.addons}}", out.V{"addons": strings.Join(toEnableList, ", ")})
for _, a := range toEnableList {
err := Set(a, "true", profile)
if err != nil {
// Intentionally non-fatal
out.WarningT("Enabling '{{.name}}' returned an error: {{.error}}", out.V{"name": a, "error": err})
awg.Add(1)
go func(name string) {
err := RunCallbacks(cc, name, "true")
if err != nil {
out.WarningT("Enabling '{{.name}}' returned an error: {{.error}}", out.V{"name": name, "error": err})
}
awg.Done()
}(a)
}
// Wait until all of the addons are enabled before updating the config (not thread safe)
awg.Wait()
for _, a := range toEnableList {
if err := Set(cc, a, "true"); err != nil {
glog.Errorf("store failed: %v", err)
}
}
}

View File

@ -20,6 +20,7 @@ import (
"io/ioutil"
"os"
"path/filepath"
"sync"
"testing"
"k8s.io/minikube/pkg/minikube/assets"
@ -59,47 +60,42 @@ func createTestProfile(t *testing.T) string {
}
func TestIsAddonAlreadySet(t *testing.T) {
profile := createTestProfile(t)
if err := Set("registry", "true", profile); err != nil {
cc := &config.ClusterConfig{Name: "test"}
if err := Set(cc, "registry", "true"); err != nil {
t.Errorf("unable to set registry true: %v", err)
}
enabled, err := assets.Addons["registry"].IsEnabled(profile)
if err != nil {
t.Errorf("registry: %v", err)
}
if !enabled {
if !assets.Addons["registry"].IsEnabled(cc) {
t.Errorf("expected registry to be enabled")
}
enabled, err = assets.Addons["ingress"].IsEnabled(profile)
if err != nil {
t.Errorf("ingress: %v", err)
}
if enabled {
if assets.Addons["ingress"].IsEnabled(cc) {
t.Errorf("expected ingress to not be enabled")
}
}
func TestDisableUnknownAddon(t *testing.T) {
profile := createTestProfile(t)
if err := Set("InvalidAddon", "false", profile); err == nil {
cc := &config.ClusterConfig{Name: "test"}
if err := Set(cc, "InvalidAddon", "false"); err == nil {
t.Fatalf("Disable did not return error for unknown addon")
}
}
func TestEnableUnknownAddon(t *testing.T) {
profile := createTestProfile(t)
if err := Set("InvalidAddon", "true", profile); err == nil {
cc := &config.ClusterConfig{Name: "test"}
if err := Set(cc, "InvalidAddon", "true"); err == nil {
t.Fatalf("Enable did not return error for unknown addon")
}
}
func TestEnableAndDisableAddon(t *testing.T) {
func TestSetAndSave(t *testing.T) {
profile := createTestProfile(t)
// enable
if err := Set("dashboard", "true", profile); err != nil {
if err := SetAndSave(profile, "dashboard", "true"); err != nil {
t.Errorf("Disable returned unexpected error: " + err.Error())
}
@ -112,7 +108,7 @@ func TestEnableAndDisableAddon(t *testing.T) {
}
// disable
if err := Set("dashboard", "false", profile); err != nil {
if err := SetAndSave(profile, "dashboard", "false"); err != nil {
t.Errorf("Disable returned unexpected error: " + err.Error())
}
@ -126,14 +122,18 @@ func TestEnableAndDisableAddon(t *testing.T) {
}
func TestStart(t *testing.T) {
profile := createTestProfile(t)
Start(profile, map[string]bool{}, []string{"dashboard"})
enabled, err := assets.Addons["dashboard"].IsEnabled(profile)
if err != nil {
t.Errorf("dashboard: %v", err)
cc := &config.ClusterConfig{
Name: "start",
CPUs: 2,
Memory: 2500,
KubernetesConfig: config.KubernetesConfig{},
}
if !enabled {
var wg sync.WaitGroup
Start(&wg, cc, map[string]bool{}, []string{"dashboard"})
wg.Wait()
if !assets.Addons["dashboard"].IsEnabled(cc) {
t.Errorf("expected dashboard to be enabled")
}
}

View File

@ -84,8 +84,8 @@ func DeleteContainer(ociBin string, name string) error {
return nil
}
// PrepareContainerNode sets up the container node befpre CreateContainerNode is caleld
// for the docker runtime, it creates a docker volume which will be mounted into kic
// PrepareContainerNode sets up the container node before CreateContainerNode is called.
// For the docker runtime, it creates a docker volume which will be mounted into kic
func PrepareContainerNode(p CreateParams) error {
if p.OCIBinary != Docker {
return nil

View File

@ -181,7 +181,7 @@ func copyAssetToDest(targetName, dest string) error {
log.Printf("%s asset path: %s", targetName, src)
contents, err := ioutil.ReadFile(src)
if err != nil {
return errors.Wrapf(err, "getting contents of %s", asset.GetAssetName())
return errors.Wrapf(err, "getting contents of %s", asset.GetSourcePath())
}
if _, err := os.Stat(dest); err == nil {
if err := os.Remove(dest); err != nil {

View File

@ -19,8 +19,6 @@ package assets
import (
"runtime"
"github.com/golang/glog"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/vmpath"
@ -49,21 +47,14 @@ func (a *Addon) Name() string {
}
// IsEnabled checks if an Addon is enabled for the given profile
func (a *Addon) IsEnabled(profile string) (bool, error) {
c, err := config.Load(profile)
if err != nil {
return false, errors.Wrap(err, "load")
}
// Is this addon explicitly listed in their configuration?
status, ok := c.Addons[a.Name()]
glog.V(1).Infof("IsEnabled %q = %v (listed in config=%v)", a.Name(), status, ok)
func (a *Addon) IsEnabled(cc *config.ClusterConfig) bool {
status, ok := cc.Addons[a.Name()]
if ok {
return status, nil
return status
}
// Return the default unconfigured state of the addon
return a.enabled, nil
return a.enabled
}
// Addons is the list of addons

View File

@ -29,11 +29,15 @@ import (
"github.com/pkg/errors"
)
// MemorySource is the source name used for in-memory copies
const MemorySource = "memory"
// CopyableFile is something that can be copied
type CopyableFile interface {
io.Reader
GetLength() int
GetAssetName() string
GetSourcePath() string
GetTargetDir() string
GetTargetName() string
GetPermissions() string
@ -43,15 +47,16 @@ type CopyableFile interface {
// BaseAsset is the base asset class
type BaseAsset struct {
AssetName string
SourcePath string
TargetDir string
TargetName string
Permissions string
Source string
}
// GetAssetName returns asset name
func (b *BaseAsset) GetAssetName() string {
return b.AssetName
// GetSourcePath returns asset name
func (b *BaseAsset) GetSourcePath() string {
return b.SourcePath
}
// GetTargetDir returns target dir
@ -99,7 +104,7 @@ func NewFileAsset(src, targetDir, targetName, permissions string) (*FileAsset, e
r := io.NewSectionReader(f, 0, info.Size())
return &FileAsset{
BaseAsset: BaseAsset{
AssetName: src,
SourcePath: src,
TargetDir: targetDir,
TargetName: targetName,
Permissions: permissions,
@ -110,7 +115,7 @@ func NewFileAsset(src, targetDir, targetName, permissions string) (*FileAsset, e
// GetLength returns the file length, or 0 (on error)
func (f *FileAsset) GetLength() (flen int) {
fi, err := os.Stat(f.AssetName)
fi, err := os.Stat(f.SourcePath)
if err != nil {
return 0
}
@ -119,7 +124,7 @@ func (f *FileAsset) GetLength() (flen int) {
// GetModTime returns modification time of the file
func (f *FileAsset) GetModTime() (time.Time, error) {
fi, err := os.Stat(f.AssetName)
fi, err := os.Stat(f.SourcePath)
if err != nil {
return time.Time{}, err
}
@ -168,6 +173,7 @@ func NewMemoryAsset(d []byte, targetDir, targetName, permissions string) *Memory
TargetDir: targetDir,
TargetName: targetName,
Permissions: permissions,
SourcePath: MemorySource,
},
reader: bytes.NewReader(d),
length: len(d),
@ -195,7 +201,7 @@ func MustBinAsset(name, targetDir, targetName, permissions string, isTemplate bo
func NewBinAsset(name, targetDir, targetName, permissions string, isTemplate bool) (*BinAsset, error) {
m := &BinAsset{
BaseAsset: BaseAsset{
AssetName: name,
SourcePath: name,
TargetDir: targetDir,
TargetName: targetName,
Permissions: permissions,
@ -218,13 +224,13 @@ func defaultValue(defValue string, val interface{}) string {
}
func (m *BinAsset) loadData(isTemplate bool) error {
contents, err := Asset(m.AssetName)
contents, err := Asset(m.SourcePath)
if err != nil {
return err
}
if isTemplate {
tpl, err := template.New(m.AssetName).Funcs(template.FuncMap{"default": defaultValue}).Parse(string(contents))
tpl, err := template.New(m.SourcePath).Funcs(template.FuncMap{"default": defaultValue}).Parse(string(contents))
if err != nil {
return err
}
@ -234,9 +240,9 @@ func (m *BinAsset) loadData(isTemplate bool) error {
m.length = len(contents)
m.reader = bytes.NewReader(contents)
glog.V(1).Infof("Created asset %s with %d bytes", m.AssetName, m.length)
glog.V(1).Infof("Created asset %s with %d bytes", m.SourcePath, m.length)
if m.length == 0 {
return fmt.Errorf("%s is an empty asset", m.AssetName)
return fmt.Errorf("%s is an empty asset", m.SourcePath)
}
return nil
}
@ -249,7 +255,7 @@ func (m *BinAsset) IsTemplate() bool {
// Evaluate evaluates the template to a new asset
func (m *BinAsset) Evaluate(data interface{}) (*MemoryAsset, error) {
if !m.IsTemplate() {
return nil, errors.Errorf("the asset %s is not a template", m.AssetName)
return nil, errors.Errorf("the asset %s is not a template", m.SourcePath)
}

View File

@ -117,9 +117,8 @@ func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node)
}
for _, f := range copyableFiles {
glog.Infof("copying: %s/%s", f.GetTargetDir(), f.GetTargetName())
if err := cmd.Copy(f); err != nil {
return nil, errors.Wrapf(err, "Copy %s", f.GetAssetName())
return nil, errors.Wrapf(err, "Copy %s", f.GetSourcePath())
}
}

View File

@ -17,12 +17,17 @@ limitations under the License.
package command
import (
"bufio"
"bytes"
"fmt"
"io"
"os"
"os/exec"
"path"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/assets"
)
@ -55,10 +60,6 @@ type Runner interface {
Remove(assets.CopyableFile) error
}
func getDeleteFileCommand(f assets.CopyableFile) string {
return fmt.Sprintf("sudo rm %s", path.Join(f.GetTargetDir(), f.GetTargetName()))
}
// Command returns a human readable command string that does not induce eye fatigue
func (rr RunResult) Command() string {
var sb strings.Builder
@ -84,3 +85,101 @@ func (rr RunResult) Output() string {
}
return sb.String()
}
// teePrefix copies bytes from a reader to writer, logging each new line.
func teePrefix(prefix string, r io.Reader, w io.Writer, logger func(format string, args ...interface{})) error {
scanner := bufio.NewScanner(r)
scanner.Split(bufio.ScanBytes)
var line bytes.Buffer
for scanner.Scan() {
b := scanner.Bytes()
if _, err := w.Write(b); err != nil {
return err
}
if bytes.IndexAny(b, "\r\n") == 0 {
if line.Len() > 0 {
logger("%s%s", prefix, line.String())
line.Reset()
}
continue
}
line.Write(b)
}
// Catch trailing output in case stream does not end with a newline
if line.Len() > 0 {
logger("%s%s", prefix, line.String())
}
return nil
}
// fileExists checks that the same file exists on the other end
func fileExists(r Runner, f assets.CopyableFile, dst string) (bool, error) {
// It's too difficult to tell if the file exists with the exact contents
if f.GetSourcePath() == assets.MemorySource {
return false, nil
}
// get file size and modtime of the source
srcSize := f.GetLength()
srcModTime, err := f.GetModTime()
if err != nil {
return false, err
}
if srcModTime.IsZero() {
return false, nil
}
// get file size and modtime of the destination
rr, err := r.RunCmd(exec.Command("stat", "-c", "%s %y", dst))
if err != nil {
if rr.ExitCode == 1 {
return false, nil
}
// avoid the noise because ssh doesn't propagate the exit code
if strings.HasSuffix(err.Error(), "status 1") {
return false, nil
}
return false, err
}
stdout := strings.TrimSpace(rr.Stdout.String())
outputs := strings.SplitN(stdout, " ", 2)
dstSize, err := strconv.Atoi(outputs[0])
if err != nil {
return false, err
}
dstModTime, err := time.Parse(layout, outputs[1])
if err != nil {
return false, err
}
if srcSize != dstSize {
return false, errors.New("source file and destination file are different sizes")
}
return srcModTime.Equal(dstModTime), nil
}
// writeFile is like ioutil.WriteFile, but does not require reading file into memory
func writeFile(dst string, f assets.CopyableFile, perms os.FileMode) error {
w, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE, perms)
if err != nil {
return errors.Wrap(err, "create")
}
defer w.Close()
r := f.(io.Reader)
n, err := io.Copy(w, r)
if err != nil {
return errors.Wrap(err, "copy")
}
if n != int64(f.GetLength()) {
return fmt.Errorf("%s: expected to write %d bytes, but wrote %d instead", dst, f.GetLength(), n)
}
return w.Close()
}

View File

@ -86,35 +86,31 @@ func (*execRunner) RunCmd(cmd *exec.Cmd) (*RunResult, error) {
// Copy copies a file and its permissions
func (*execRunner) Copy(f assets.CopyableFile) error {
targetPath := path.Join(f.GetTargetDir(), f.GetTargetName())
if _, err := os.Stat(targetPath); err == nil {
if err := os.Remove(targetPath); err != nil {
return errors.Wrapf(err, "error removing file %s", targetPath)
dst := path.Join(f.GetTargetDir(), f.GetTargetName())
if _, err := os.Stat(dst); err == nil {
glog.Infof("found %s, removing ...", dst)
if err := os.Remove(dst); err != nil {
return errors.Wrapf(err, "error removing file %s", dst)
}
}
src := f.GetSourcePath()
glog.Infof("cp: %s --> %s (%d bytes)", src, dst, f.GetLength())
if f.GetLength() == 0 {
glog.Warningf("0 byte asset: %+v", f)
}
target, err := os.Create(targetPath)
if err != nil {
return errors.Wrapf(err, "error creating file at %s", targetPath)
}
perms, err := strconv.ParseInt(f.GetPermissions(), 8, 0)
if err != nil {
return errors.Wrapf(err, "error converting permissions %s to integer", f.GetPermissions())
}
if err := os.Chmod(targetPath, os.FileMode(perms)); err != nil {
return errors.Wrapf(err, "error changing file permissions for %s", targetPath)
}
if _, err = io.Copy(target, f); err != nil {
return errors.Wrapf(err, `error copying file %s to target location:
do you have the correct permissions?`,
targetPath)
}
return target.Close()
return writeFile(dst, f, os.FileMode(perms))
}
// Remove removes a file
func (*execRunner) Remove(f assets.CopyableFile) error {
targetPath := filepath.Join(f.GetTargetDir(), f.GetTargetName())
return os.Remove(targetPath)
dst := filepath.Join(f.GetTargetDir(), f.GetTargetName())
glog.Infof("rm: %s", dst)
return os.Remove(dst)
}

View File

@ -97,13 +97,13 @@ func (f *FakeCommandRunner) Copy(file assets.CopyableFile) error {
if err != nil {
return errors.Wrapf(err, "error reading file: %+v", file)
}
f.fileMap.Store(file.GetAssetName(), b.String())
f.fileMap.Store(file.GetSourcePath(), b.String())
return nil
}
// Remove removes the filename, file contents key value pair from the stored map
func (f *FakeCommandRunner) Remove(file assets.CopyableFile) error {
f.fileMap.Delete(file.GetAssetName())
f.fileMap.Delete(file.GetSourcePath())
return nil
}

View File

@ -128,44 +128,73 @@ func (k *kicRunner) RunCmd(cmd *exec.Cmd) (*RunResult, error) {
// Copy copies a file and its permissions
func (k *kicRunner) Copy(f assets.CopyableFile) error {
src := f.GetAssetName()
if _, err := os.Stat(f.GetAssetName()); os.IsNotExist(err) {
fc := make([]byte, f.GetLength()) // Read asset file into a []byte
if _, err := f.Read(fc); err != nil {
return errors.Wrap(err, "can't copy non-existing file")
} // we have a MemoryAsset, will write to disk before copying
dst := path.Join(path.Join(f.GetTargetDir(), f.GetTargetName()))
tmpFile, err := ioutil.TempFile(os.TempDir(), "tmpf-memory-asset")
// For tiny files, it's cheaper to overwrite than check
if f.GetLength() > 4096 {
exists, err := fileExists(k, f, dst)
if err != nil {
return errors.Wrap(err, "creating temporary file")
glog.Infof("existence error for %s: %v", dst, err)
}
// clean up the temp file
defer os.Remove(tmpFile.Name())
if _, err = tmpFile.Write(fc); err != nil {
return errors.Wrap(err, "write to temporary file")
if exists {
glog.Infof("copy: skipping %s (exists)", dst)
return nil
}
}
// Close the file
if err := tmpFile.Close(); err != nil {
return errors.Wrap(err, "close temporary file")
}
src = tmpFile.Name()
src := f.GetSourcePath()
if f.GetLength() == 0 {
glog.Warningf("0 byte asset: %+v", f)
}
perms, err := strconv.ParseInt(f.GetPermissions(), 8, 0)
if err != nil {
return errors.Wrapf(err, "converting permissions %s to integer", f.GetPermissions())
return errors.Wrapf(err, "error converting permissions %s to integer", f.GetPermissions())
}
// Rely on cp -a to propagate permissions
if err := os.Chmod(src, os.FileMode(perms)); err != nil {
return errors.Wrapf(err, "chmod")
if src != assets.MemorySource {
// Take the fast path
fi, err := os.Stat(src)
if err == nil {
if fi.Mode() == os.FileMode(perms) {
glog.Infof("%s (direct): %s --> %s (%d bytes)", k.ociBin, src, dst, f.GetLength())
return k.copy(src, dst)
}
// If >1MB, avoid local copy
if fi.Size() > (1024 * 1024) {
glog.Infof("%s (chmod): %s --> %s (%d bytes)", k.ociBin, src, dst, f.GetLength())
if err := k.copy(src, dst); err != nil {
return err
}
return k.chmod(dst, f.GetPermissions())
}
}
}
dest := fmt.Sprintf("%s:%s", k.nameOrID, path.Join(f.GetTargetDir(), f.GetTargetName()))
glog.Infof("%s (temp): %s --> %s (%d bytes)", k.ociBin, src, dst, f.GetLength())
tf, err := ioutil.TempFile("", "tmpf-memory-asset")
if err != nil {
return errors.Wrap(err, "creating temporary file")
}
defer os.Remove(tf.Name())
if err := writeFile(tf.Name(), f, os.FileMode(perms)); err != nil {
return errors.Wrap(err, "write")
}
return k.copy(tf.Name(), dst)
}
func (k *kicRunner) copy(src string, dst string) error {
fullDest := fmt.Sprintf("%s:%s", k.nameOrID, dst)
if k.ociBin == oci.Podman {
return copyToPodman(src, dest)
return copyToPodman(src, fullDest)
}
return copyToDocker(src, dest)
return copyToDocker(src, fullDest)
}
func (k *kicRunner) chmod(dst string, perm string) error {
_, err := k.RunCmd(exec.Command("sudo", "chmod", perm, dst))
return err
}
// Podman cp command doesn't match docker and doesn't have -a
@ -185,11 +214,11 @@ func copyToDocker(src string, dest string) error {
// Remove removes a file
func (k *kicRunner) Remove(f assets.CopyableFile) error {
fp := path.Join(f.GetTargetDir(), f.GetTargetName())
if rr, err := k.RunCmd(exec.Command("sudo", "rm", fp)); err != nil {
return errors.Wrapf(err, "removing file %q output: %s", fp, rr.Output())
}
return nil
dst := path.Join(f.GetTargetDir(), f.GetTargetName())
glog.Infof("rm: %s", dst)
_, err := k.RunCmd(exec.Command("sudo", "rm", dst))
return err
}
// isTerminal returns true if the writer w is a terminal

View File

@ -17,14 +17,11 @@ limitations under the License.
package command
import (
"bufio"
"bytes"
"fmt"
"io"
"os/exec"
"path"
"strconv"
"strings"
"sync"
"time"
@ -55,13 +52,16 @@ func NewSSHRunner(c *ssh.Client) *SSHRunner {
// Remove runs a command to delete a file on the remote.
func (s *SSHRunner) Remove(f assets.CopyableFile) error {
dst := path.Join(f.GetTargetDir(), f.GetTargetName())
glog.Infof("rm: %s", dst)
sess, err := s.c.NewSession()
if err != nil {
return errors.Wrap(err, "getting ssh session")
}
defer sess.Close()
cmd := getDeleteFileCommand(f)
return sess.Run(cmd)
return sess.Run(fmt.Sprintf("sudo rm %s", dst))
}
// teeSSH runs an SSH command, streaming stdout, stderr to logs
@ -150,14 +150,26 @@ func (s *SSHRunner) RunCmd(cmd *exec.Cmd) (*RunResult, error) {
// Copy copies a file to the remote over SSH.
func (s *SSHRunner) Copy(f assets.CopyableFile) error {
dst := path.Join(path.Join(f.GetTargetDir(), f.GetTargetName()))
exists, err := s.sameFileExists(f, dst)
if err != nil {
glog.Infof("Checked if %s exists, but got error: %v", dst, err)
// For small files, don't bother risking being wrong for no performance benefit
if f.GetLength() > 2048 {
exists, err := fileExists(s, f, dst)
if err != nil {
glog.Infof("existence check for %s: %v", dst, err)
}
if exists {
glog.Infof("copy: skipping %s (exists)", dst)
return nil
}
}
if exists {
glog.Infof("Skipping copying %s as it already exists", dst)
return nil
src := f.GetSourcePath()
glog.Infof("scp %s --> %s (%d bytes)", src, dst, f.GetLength())
if f.GetLength() == 0 {
glog.Warningf("0 byte asset: %+v", f)
}
sess, err := s.c.NewSession()
if err != nil {
return errors.Wrap(err, "NewSession")
@ -171,14 +183,13 @@ func (s *SSHRunner) Copy(f assets.CopyableFile) error {
// StdinPipe is closed. But let's use errgroup to make it explicit.
var g errgroup.Group
var copied int64
glog.Infof("Transferring %d bytes to %s", f.GetLength(), dst)
g.Go(func() error {
defer w.Close()
header := fmt.Sprintf("C%s %d %s\n", f.GetPermissions(), f.GetLength(), f.GetTargetName())
fmt.Fprint(w, header)
if f.GetLength() == 0 {
glog.Warningf("%s is a 0 byte asset!", f.GetTargetName())
glog.Warningf("asked to copy a 0 byte asset: %+v", f)
fmt.Fprint(w, "\x00")
return nil
}
@ -190,7 +201,6 @@ func (s *SSHRunner) Copy(f assets.CopyableFile) error {
if copied != int64(f.GetLength()) {
return fmt.Errorf("%s: expected to copy %d bytes, but copied %d instead", f.GetTargetName(), f.GetLength(), copied)
}
glog.Infof("%s: copied %d bytes", f.GetTargetName(), copied)
fmt.Fprint(w, "\x00")
return nil
})
@ -208,72 +218,3 @@ func (s *SSHRunner) Copy(f assets.CopyableFile) error {
}
return g.Wait()
}
func (s *SSHRunner) sameFileExists(f assets.CopyableFile, dst string) (bool, error) {
// get file size and modtime of the source
srcSize := f.GetLength()
srcModTime, err := f.GetModTime()
if err != nil {
return false, err
}
if srcModTime.IsZero() {
return false, nil
}
// get file size and modtime of the destination
sess, err := s.c.NewSession()
if err != nil {
return false, err
}
cmd := "stat -c \"%s %y\" " + dst
out, err := sess.CombinedOutput(cmd)
if err != nil {
return false, err
}
outputs := strings.SplitN(strings.Trim(string(out), "\n"), " ", 2)
dstSize, err := strconv.Atoi(outputs[0])
if err != nil {
return false, err
}
dstModTime, err := time.Parse(layout, outputs[1])
if err != nil {
return false, err
}
glog.Infof("found %s: %d bytes, modified at %s", dst, dstSize, dstModTime)
// compare sizes and modtimes
if srcSize != dstSize {
return false, errors.New("source file and destination file are different sizes")
}
return srcModTime.Equal(dstModTime), nil
}
// teePrefix copies bytes from a reader to writer, logging each new line.
func teePrefix(prefix string, r io.Reader, w io.Writer, logger func(format string, args ...interface{})) error {
scanner := bufio.NewScanner(r)
scanner.Split(bufio.ScanBytes)
var line bytes.Buffer
for scanner.Scan() {
b := scanner.Bytes()
if _, err := w.Write(b); err != nil {
return err
}
if bytes.IndexAny(b, "\r\n") == 0 {
if line.Len() > 0 {
logger("%s%s", prefix, line.String())
line.Reset()
}
continue
}
line.Write(b)
}
// Catch trailing output in case stream does not end with a newline
if line.Len() > 0 {
logger("%s%s", prefix, line.String())
}
return nil
}

View File

@ -18,14 +18,13 @@ limitations under the License.
package exit
import (
"fmt"
"os"
"runtime"
"runtime/debug"
"github.com/golang/glog"
"k8s.io/minikube/pkg/minikube/out"
"k8s.io/minikube/pkg/minikube/problem"
"k8s.io/minikube/pkg/minikube/translate"
)
// Exit codes based on sysexits(3)
@ -40,9 +39,6 @@ const (
IO = 74 // IO represents an I/O error
Config = 78 // Config represents an unconfigured or misconfigured state
Permissions = 77 // Permissions represents a permissions error
// MaxLogEntries controls the number of log entries to show for each source
MaxLogEntries = 3
)
// UsageT outputs a templated usage error and exits with error code 64
@ -59,11 +55,12 @@ func WithCodeT(code int, format string, a ...out.V) {
// WithError outputs an error and exits.
func WithError(msg string, err error) {
glog.Infof("WithError(%s)=%v called from:\n%s", msg, err, debug.Stack())
p := problem.FromError(err, runtime.GOOS)
if p != nil {
WithProblem(msg, err, p)
}
displayError(msg, err)
out.DisplayError(msg, err)
os.Exit(Software)
}
@ -79,29 +76,3 @@ func WithProblem(msg string, err error, p *problem.Problem) {
}
os.Exit(Config)
}
// WithLogEntries outputs an error along with any important log entries, and exits.
func WithLogEntries(msg string, err error, entries map[string][]string) {
displayError(msg, err)
for name, lines := range entries {
out.FailureT("Problems detected in {{.entry}}:", out.V{"entry": name})
if len(lines) > MaxLogEntries {
lines = lines[:MaxLogEntries]
}
for _, l := range lines {
out.T(out.LogEntry, l)
}
}
os.Exit(Software)
}
func displayError(msg string, err error) {
// use Warning because Error will display a duplicate message to stderr
glog.Warningf(fmt.Sprintf("%s: %v", msg, err))
out.ErrT(out.Empty, "")
out.FatalT("{{.msg}}: {{.err}}", out.V{"msg": translate.T(msg), "err": err})
out.ErrT(out.Empty, "")
out.ErrT(out.Sad, "minikube is exiting due to an error. If the above message is not useful, open an issue:")
out.ErrT(out.URL, "https://github.com/kubernetes/minikube/issues/new/choose")
}

View File

@ -149,7 +149,7 @@ func TestAssetsFromDir(t *testing.T) {
got := make(map[string]string)
for _, actualFile := range actualFiles {
got[actualFile.GetAssetName()] = actualFile.GetTargetDir()
got[actualFile.GetSourcePath()] = actualFile.GetTargetDir()
}
if diff := cmp.Diff(want, got); diff != "" {
t.Errorf("files differ: (-want +got)\n%s", diff)

View File

@ -81,7 +81,6 @@ func handleDownloadOnly(cacheGroup, kicGroup *errgroup.Group, k8sVersion string)
}
out.T(out.Check, "Download complete!")
os.Exit(0)
}
// CacheKubectlBinary caches the kubectl binary

View File

@ -22,6 +22,7 @@ import (
"os/exec"
"path/filepath"
"strconv"
"sync"
"github.com/golang/glog"
"github.com/spf13/viper"
@ -46,7 +47,10 @@ func showVersionInfo(k8sVersion string, cr cruntime.Manager) {
}
// configureMounts configures any requested filesystem mounts
func configureMounts() {
func configureMounts(wg *sync.WaitGroup) {
wg.Add(1)
defer wg.Done()
if !viper.GetBool(createMount) {
return
}

View File

@ -39,7 +39,21 @@ func Add(cc *config.ClusterConfig, n config.Node) error {
return errors.Wrap(err, "save node")
}
_, err := Start(*cc, n, nil, false)
r, p, m, h, err := Provision(cc, &n, false)
if err != nil {
return err
}
s := Starter{
Runner: r,
PreExists: p,
MachineAPI: m,
Host: h,
Cfg: cc,
Node: &n,
ExistingAddons: nil,
}
_, err = Start(s, false)
return err
}

View File

@ -59,58 +59,45 @@ import (
const waitTimeout = "wait-timeout"
var (
kicGroup errgroup.Group
cacheGroup errgroup.Group
)
// Starter is a struct with all the necessary information to start a node
type Starter struct {
Runner command.Runner
PreExists bool
MachineAPI libmachine.API
Host *host.Host
Cfg *config.ClusterConfig
Node *config.Node
ExistingAddons map[string]bool
}
// Start spins up a guest and starts the kubernetes node.
func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool, apiServer bool) (*kubeconfig.Settings, error) {
name := driver.MachineName(cc, n)
if apiServer {
out.T(out.ThumbsUp, "Starting control plane node {{.name}} in cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name})
} else {
out.T(out.ThumbsUp, "Starting node {{.name}} in cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name})
}
var kicGroup errgroup.Group
if driver.IsKIC(cc.Driver) {
beginDownloadKicArtifacts(&kicGroup)
}
var cacheGroup errgroup.Group
if !driver.BareMetal(cc.Driver) {
beginCacheKubernetesImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion, cc.KubernetesConfig.ContainerRuntime)
}
// Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot.
// Hence, saveConfig must be called before startHost, and again afterwards when we know the IP.
if err := config.SaveProfile(viper.GetString(config.ProfileName), &cc); err != nil {
exit.WithError("Failed to save config", err)
}
handleDownloadOnly(&cacheGroup, &kicGroup, n.KubernetesVersion)
waitDownloadKicArtifacts(&kicGroup)
mRunner, preExists, machineAPI, host := startMachine(&cc, &n)
defer machineAPI.Close()
func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) {
// wait for preloaded tarball to finish downloading before configuring runtimes
waitCacheRequiredImages(&cacheGroup)
sv, err := util.ParseKubernetesVersion(n.KubernetesVersion)
sv, err := util.ParseKubernetesVersion(starter.Node.KubernetesVersion)
if err != nil {
return nil, errors.Wrap(err, "Failed to parse kubernetes version")
}
// configure the runtime (docker, containerd, crio)
cr := configureRuntimes(mRunner, cc, sv)
showVersionInfo(n.KubernetesVersion, cr)
cr := configureRuntimes(starter.Runner, *starter.Cfg, sv)
showVersionInfo(starter.Node.KubernetesVersion, cr)
// ssh should be set up by now
// switch to using ssh runner since it is faster
if driver.IsKIC(cc.Driver) {
sshRunner, err := machine.SSHRunner(host)
if driver.IsKIC(starter.Cfg.Driver) {
sshRunner, err := machine.SSHRunner(starter.Host)
if err != nil {
glog.Infof("error getting ssh runner: %v", err)
} else {
glog.Infof("Using ssh runner for kic...")
mRunner = sshRunner
starter.Runner = sshRunner
}
}
@ -118,17 +105,18 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo
var kcs *kubeconfig.Settings
if apiServer {
// Must be written before bootstrap, otherwise health checks may flake due to stale IP
kcs = setupKubeconfig(host, &cc, &n, cc.Name)
kcs = setupKubeconfig(starter.Host, starter.Cfg, starter.Node, starter.Cfg.Name)
if err != nil {
return nil, errors.Wrap(err, "Failed to setup kubeconfig")
}
// setup kubeadm (must come after setupKubeconfig)
bs = setupKubeAdm(machineAPI, cc, n)
bs = setupKubeAdm(starter.MachineAPI, *starter.Cfg, *starter.Node)
err = bs.StartCluster(*starter.Cfg)
err = bs.StartCluster(cc)
if err != nil {
exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, cc, mRunner))
out.LogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, *starter.Cfg, starter.Runner))
return nil, err
}
// write the kubeconfig to the file system after everything required (like certs) are created by the bootstrapper
@ -136,65 +124,104 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo
return nil, errors.Wrap(err, "Failed to update kubeconfig file.")
}
} else {
bs, err = cluster.Bootstrapper(machineAPI, viper.GetString(cmdcfg.Bootstrapper), cc, n)
bs, err = cluster.Bootstrapper(starter.MachineAPI, viper.GetString(cmdcfg.Bootstrapper), *starter.Cfg, *starter.Node)
if err != nil {
return nil, errors.Wrap(err, "Failed to get bootstrapper")
}
if err = bs.SetupCerts(cc.KubernetesConfig, n); err != nil {
if err = bs.SetupCerts(starter.Cfg.KubernetesConfig, *starter.Node); err != nil {
return nil, errors.Wrap(err, "setting up certs")
}
}
configureMounts()
var wg sync.WaitGroup
go configureMounts(&wg)
if err := CacheAndLoadImagesInConfig(); err != nil {
out.FailureT("Unable to push cached images from config: {{.error}}", out.V{"error": err})
}
wg.Add(1)
go func() {
if err := CacheAndLoadImagesInConfig(); err != nil {
out.FailureT("Unable to push cached images: {{error}}", out.V{"error": err})
}
wg.Done()
}()
// enable addons, both old and new!
if existingAddons != nil {
addons.Start(viper.GetString(config.ProfileName), existingAddons, config.AddonList)
if starter.ExistingAddons != nil {
go addons.Start(&wg, starter.Cfg, starter.ExistingAddons, config.AddonList)
}
if apiServer {
// special ops for none , like change minikube directory.
// multinode super doesn't work on the none driver
if cc.Driver == driver.None && len(cc.Nodes) == 1 {
if starter.Cfg.Driver == driver.None && len(starter.Cfg.Nodes) == 1 {
prepareNone()
}
// Skip pre-existing, because we already waited for health
if kverify.ShouldWait(cc.VerifyComponents) && !preExists {
if err := bs.WaitForNode(cc, n, viper.GetDuration(waitTimeout)); err != nil {
if kverify.ShouldWait(starter.Cfg.VerifyComponents) && !starter.PreExists {
if err := bs.WaitForNode(*starter.Cfg, *starter.Node, viper.GetDuration(waitTimeout)); err != nil {
return nil, errors.Wrap(err, "Wait failed")
}
}
} else {
if err := bs.UpdateNode(cc, n, cr); err != nil {
if err := bs.UpdateNode(*starter.Cfg, *starter.Node, cr); err != nil {
return nil, errors.Wrap(err, "Updating node")
}
cp, err := config.PrimaryControlPlane(&cc)
cp, err := config.PrimaryControlPlane(starter.Cfg)
if err != nil {
return nil, errors.Wrap(err, "Getting primary control plane")
}
cpBs, err := cluster.Bootstrapper(machineAPI, viper.GetString(cmdcfg.Bootstrapper), cc, cp)
cpBs, err := cluster.Bootstrapper(starter.MachineAPI, viper.GetString(cmdcfg.Bootstrapper), *starter.Cfg, cp)
if err != nil {
return nil, errors.Wrap(err, "Getting bootstrapper")
}
joinCmd, err := cpBs.GenerateToken(cc)
joinCmd, err := cpBs.GenerateToken(*starter.Cfg)
if err != nil {
return nil, errors.Wrap(err, "generating join token")
}
if err = bs.JoinCluster(cc, n, joinCmd); err != nil {
if err = bs.JoinCluster(*starter.Cfg, *starter.Node, joinCmd); err != nil {
return nil, errors.Wrap(err, "joining cluster")
}
}
return kcs, nil
wg.Wait()
// Write enabled addons to the config before completion
return kcs, config.Write(viper.GetString(config.ProfileName), starter.Cfg)
}
// Provision provisions the machine/container for the node
func Provision(cc *config.ClusterConfig, n *config.Node, apiServer bool) (command.Runner, bool, libmachine.API, *host.Host, error) {
name := driver.MachineName(*cc, *n)
if apiServer {
out.T(out.ThumbsUp, "Starting control plane node {{.name}} in cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name})
} else {
out.T(out.ThumbsUp, "Starting node {{.name}} in cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name})
}
if driver.IsKIC(cc.Driver) {
beginDownloadKicArtifacts(&kicGroup)
}
if !driver.BareMetal(cc.Driver) {
beginCacheKubernetesImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion, cc.KubernetesConfig.ContainerRuntime)
}
// Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot.
// Hence, saveConfig must be called before startHost, and again afterwards when we know the IP.
if err := config.SaveProfile(viper.GetString(config.ProfileName), cc); err != nil {
return nil, false, nil, nil, errors.Wrap(err, "Failed to save config")
}
handleDownloadOnly(&cacheGroup, &kicGroup, n.KubernetesVersion)
waitDownloadKicArtifacts(&kicGroup)
return startMachine(cc, n)
}
// ConfigureRuntimes does what needs to happen to get a runtime going.
@ -303,18 +330,24 @@ func apiServerURL(h host.Host, cc config.ClusterConfig, n config.Node) (string,
}
// StartMachine starts a VM
func startMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) {
func startMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host, err error) {
m, err := machine.NewAPIClient()
if err != nil {
exit.WithError("Failed to get machine client", err)
return runner, preExists, m, host, errors.Wrap(err, "Failed to get machine client")
}
host, preExists, err = startHost(m, *cfg, *node)
if err != nil {
return runner, preExists, m, host, errors.Wrap(err, "Failed to start host")
}
host, preExists = startHost(m, *cfg, *node)
runner, err = machine.CommandRunner(host)
if err != nil {
exit.WithError("Failed to get command runner", err)
return runner, preExists, m, host, errors.Wrap(err, "Failed to get command runner")
}
ip := validateNetwork(host, runner, cfg.KubernetesConfig.ImageRepository)
ip, err := validateNetwork(host, runner, cfg.KubernetesConfig.ImageRepository)
if err != nil {
return runner, preExists, m, host, errors.Wrap(err, "Failed to validate network")
}
// Bypass proxy for minikube's vm host ip
err = proxy.ExcludeIP(ip)
@ -326,17 +359,17 @@ func startMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.
node.IP = ip
err = config.SaveNode(cfg, node)
if err != nil {
exit.WithError("saving node", err)
return runner, preExists, m, host, errors.Wrap(err, "saving node")
}
return runner, preExists, m, host
return runner, preExists, m, host, err
}
// startHost starts a new minikube host using a VM or None
func startHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.Host, bool) {
func startHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.Host, bool, error) {
host, exists, err := machine.StartHost(api, cc, n)
if err == nil {
return host, exists
return host, exists, nil
}
out.ErrT(out.Embarrassed, "StartHost failed, but will try again: {{.error}}", out.V{"error": err})
@ -353,20 +386,20 @@ func startHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*hos
host, exists, err = machine.StartHost(api, cc, n)
if err == nil {
return host, exists
return host, exists, nil
}
// Don't use host.Driver to avoid nil pointer deref
drv := cc.Driver
exit.WithError(fmt.Sprintf(`Failed to start %s %s. "%s" may fix it.`, drv, driver.MachineType(drv), mustload.ExampleCmd(cc.Name, "start")), err)
return host, exists
out.ErrT(out.Sad, `Failed to start {{.driver}} {{.driver_type}}. "{{.cmd}}" may fix it: {{.error}}`, out.V{"driver": drv, "driver_type": driver.MachineType(drv), "cmd": mustload.ExampleCmd(cc.Name, "start"), "error": err})
return host, exists, err
}
// validateNetwork tries to catch network problems as soon as possible
func validateNetwork(h *host.Host, r command.Runner, imageRepository string) string {
func validateNetwork(h *host.Host, r command.Runner, imageRepository string) (string, error) {
ip, err := h.Driver.GetIP()
if err != nil {
exit.WithError("Unable to get VM IP address", err)
return ip, err
}
optSeen := false
@ -388,17 +421,19 @@ func validateNetwork(h *host.Host, r command.Runner, imageRepository string) str
}
if !driver.BareMetal(h.Driver.DriverName()) && !driver.IsKIC(h.Driver.DriverName()) {
trySSH(h, ip)
if err := trySSH(h, ip); err != nil {
return ip, err
}
}
// Non-blocking
go tryRegistry(r, h.Driver.DriverName(), imageRepository)
return ip
return ip, nil
}
func trySSH(h *host.Host, ip string) {
func trySSH(h *host.Host, ip string) error {
if viper.GetBool("force") {
return
return nil
}
sshAddr := net.JoinHostPort(ip, "22")
@ -414,8 +449,9 @@ func trySSH(h *host.Host, ip string) {
return nil
}
if err := retry.Expo(dial, time.Second, 13*time.Second); err != nil {
exit.WithCodeT(exit.IO, `minikube is unable to connect to the VM: {{.error}}
err := retry.Expo(dial, time.Second, 13*time.Second)
if err != nil {
out.ErrT(out.FailureType, `minikube is unable to connect to the VM: {{.error}}
This is likely due to one of two reasons:
@ -431,6 +467,8 @@ func trySSH(h *host.Host, ip string) {
- Use --force to override this connectivity check
`, out.V{"error": err, "hypervisor": h.Driver.DriverName(), "ip": ip})
}
return err
}
// tryRegistry tries to connect to the image repository

View File

@ -26,6 +26,7 @@ import (
"github.com/golang/glog"
isatty "github.com/mattn/go-isatty"
"k8s.io/minikube/pkg/minikube/translate"
)
// By design, this package uses global references to language and output objects, in preference
@ -51,6 +52,9 @@ var (
OverrideEnv = "MINIKUBE_IN_STYLE"
)
// MaxLogEntries controls the number of log entries to show for each source
const MaxLogEntries = 3
// fdWriter is the subset of file.File that implements io.Writer and Fd()
type fdWriter interface {
io.Writer
@ -175,3 +179,29 @@ func wantsColor(fd uintptr) bool {
glog.Infof("isatty.IsTerminal(%d) = %v\n", fd, isT)
return isT
}
// LogEntries outputs an error along with any important log entries.
func LogEntries(msg string, err error, entries map[string][]string) {
DisplayError(msg, err)
for name, lines := range entries {
T(FailureType, "Problems detected in {{.entry}}:", V{"entry": name})
if len(lines) > MaxLogEntries {
lines = lines[:MaxLogEntries]
}
for _, l := range lines {
T(LogEntry, l)
}
}
}
// DisplayError prints the error and displays the standard minikube error messaging
func DisplayError(msg string, err error) {
// use Warning because Error will display a duplicate message to stderr
glog.Warningf(fmt.Sprintf("%s: %v", msg, err))
ErrT(Empty, "")
FatalT("{{.msg}}: {{.err}}", V{"msg": translate.T(msg), "err": err})
ErrT(Empty, "")
ErrT(Sad, "minikube is exiting due to an error. If the above message is not useful, open an issue:")
ErrT(URL, "https://github.com/kubernetes/minikube/issues/new/choose")
}

View File

@ -83,8 +83,8 @@ All translations are stored in the top-level `translations` directory.
```
~/minikube$ LC_ALL=fr out/minikube start
😄 minikube v1.9.2 sur Darwin 10.14.5
✨ Choix automatique du driver hyperkit. Autres choix: <no value>
👍 Starting control plane node minikube in cluster minikube
✨ Choix automatique du driver hyperkit. Autres choix: docker
👍 Démarrage du noeud de plan de contrôle minikube dans le cluster minikube
🔥 Création de VM hyperkit (CPUs=2, Mémoire=4000MB, Disque=20000MB)...
🐳 Préparation de Kubernetes v1.18.0 sur Docker 19.03.8...
🌟 Installation des addons: default-storageclass, storage-provisioner

View File

@ -0,0 +1,33 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: hello
spec:
replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 100%
selector:
matchLabels:
app: hello
template:
metadata:
labels:
app: hello
spec:
affinity:
# ⬇⬇⬇ This ensures pods will land on separate hosts
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions: [{ key: app, operator: In, values: [hello-from] }]
topologyKey: "kubernetes.io/hostname"
containers:
- name: hello-from
image: pbitty/hello-from:latest
ports:
- name: http
containerPort: 80
terminationGracePeriodSeconds: 1

View File

@ -0,0 +1,14 @@
---
apiVersion: v1
kind: Service
metadata:
name: hello
spec:
type: NodePort
selector:
app: hello
ports:
- protocol: TCP
nodePort: 31000
port: 80
targetPort: http

View File

@ -0,0 +1,602 @@
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-amd64
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: In
values:
- linux
- key: beta.kubernetes.io/arch
operator: In
values:
- amd64
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.11.0-amd64
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.11.0-amd64
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-arm64
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: In
values:
- linux
- key: beta.kubernetes.io/arch
operator: In
values:
- arm64
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.11.0-arm64
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.11.0-arm64
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-arm
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: In
values:
- linux
- key: beta.kubernetes.io/arch
operator: In
values:
- arm
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.11.0-arm
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.11.0-arm
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-ppc64le
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: In
values:
- linux
- key: beta.kubernetes.io/arch
operator: In
values:
- ppc64le
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.11.0-ppc64le
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.11.0-ppc64le
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-s390x
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: In
values:
- linux
- key: beta.kubernetes.io/arch
operator: In
values:
- s390x
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.11.0-s390x
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.11.0-s390x
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg

View File

@ -0,0 +1,134 @@
---
title: "Using Multi-Node Clusters (Experimental)"
linkTitle: "Using multi-node clusters"
weight: 1
date: 2019-11-24
---
## Overview
- This tutorial will show you how to start a multi-node clusters on minikube and deploy a service to it.
## Prerequisites
- minikube 1.9.0 or higher
- kubectl
## Tutorial
- Start a cluster with 2 nodes in the driver of your choice (the extra parameters are to make our chosen CNI, flannel, work while we're still experimental):
```
minikube start --nodes 2 -p multinode-demo --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.244.0.0/16
😄 [multinode-demo] minikube v1.9.2 on Darwin 10.14.6
✨ Automatically selected the hyperkit driver
👍 Starting control plane node m01 in cluster multinode-demo
🔥 Creating hyperkit VM (CPUs=2, Memory=4000MB, Disk=20000MB) ...
🐳 Preparing Kubernetes v1.18.0 on Docker 19.03.8 ...
🌟 Enabling addons: default-storageclass, storage-provisioner
👍 Starting node m02 in cluster multinode-demo
🔥 Creating hyperkit VM (CPUs=2, Memory=4000MB, Disk=20000MB) ...
🌐 Found network options:
▪ NO_PROXY=192.168.64.213
🐳 Preparing Kubernetes v1.18.0 on Docker 19.03.8 ...
🏄 Done! kubectl is now configured to use "multinode-demo"
```
- Get the list of your nodes:
```
kubectl get nodes
NAME STATUS ROLES AGE VERSION
multinode-demo Ready master 9m58s v1.18.0
multinode-demo-m02 Ready <none> 9m5s v1.18.0
```
- Install a CNI (e.g. flannel):
NOTE: This currently needs to be done manually after the apiserver is running, the multi-node feature is still experimental as of 1.9.2.
```
kubectl apply -f kube-flannel.yaml
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds-amd64 created
daemonset.apps/kube-flannel-ds-arm64 created
daemonset.apps/kube-flannel-ds-arm created
daemonset.apps/kube-flannel-ds-ppc64le created
daemonset.apps/kube-flannel-ds-s390x created
```
- Deploy our hello world deployment:
```
kubectl apply -f hello-deployment.yaml
deployment.apps/hello created
kubectl rollout status deployment/hello
deployment "hello" successfully rolled out
```
- Deploy our hello world service, which just spits back the IP address the request was served from:
{{% readfile file="/docs/tutorials/includes/hello-svc.yaml" %}}
```
kubectl apply -f hello-svc.yml
service/hello created
```
- Check out the IP addresses of our pods, to note for future reference
```
kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
hello-c7b8df44f-qbhxh 1/1 Running 0 31s 10.244.0.3 multinode-demo <none> <none>
hello-c7b8df44f-xv4v6 1/1 Running 0 31s 10.244.0.2 multinode-demo <none> <none>
```
- Look at our service, to know what URL to hit
```
minikube service list
|-------------|------------|--------------|-----------------------------|
| NAMESPACE | NAME | TARGET PORT | URL |
|-------------|------------|--------------|-----------------------------|
| default | hello | 80 | http://192.168.64.226:31000 |
| default | kubernetes | No node port | |
| kube-system | kube-dns | No node port | |
|-------------|------------|--------------|-----------------------------|
```
- Let's hit the URL a few times and see what comes back
```
curl http://192.168.64.226:31000
Hello from hello-c7b8df44f-qbhxh (10.244.0.3)
curl http://192.168.64.226:31000
Hello from hello-c7b8df44f-qbhxh (10.244.0.3)
curl http://192.168.64.226:31000
Hello from hello-c7b8df44f-xv4v6 (10.244.0.2)
curl http://192.168.64.226:31000
Hello from hello-c7b8df44f-xv4v6 (10.244.0.2)
```
- Multiple nodes!
- Referenced YAML files
{{% tabs %}}
{{% tab kube-flannel.yaml %}}
```
{{% readfile file="/docs/tutorials/includes/kube-flannel.yaml" %}}
```
{{% /tab %}}
{{% tab hello-deployment.yaml %}}
```
{{% readfile file="/docs/tutorials/includes/hello-deployment.yaml" %}}
```
{{% /tab %}}
{{% tab hello-svc.yaml %}}
```
{{% readfile file="/docs/tutorials/includes/hello-svc.yaml" %}}
```
{{% /tab %}}
{{% /tabs %}}

View File

@ -203,7 +203,8 @@ func clusterLogs(t *testing.T, profile string) {
t.Logf("-----------------------post-mortem--------------------------------")
t.Logf("<<< %s FAILED: start of post-mortem logs <<<", t.Name())
t.Logf("-------------------post-mortem minikube logs----------------------")
t.Logf("======> post-mortem[%s]: minikube logs <======", t.Name())
rr, err := Run(t, exec.Command(Target(), "-p", profile, "logs", "--problems"))
if err != nil {
t.Logf("failed logs error: %v", err)
@ -211,27 +212,43 @@ func clusterLogs(t *testing.T, profile string) {
}
t.Logf("%s logs: %s", t.Name(), rr.Output())
t.Logf("------------------post-mortem api server status-------------------")
t.Logf("======> post-mortem[%s]: disk usage <======", t.Name())
rr, err = Run(t, exec.Command(Target(), "-p", profile, "ssh", "df -h /var/lib/docker/overlay2 /var /; du -hs /var/lib/docker/overlay2"))
if err != nil {
t.Logf("failed df error: %v", err)
}
t.Logf("%s df: %s", t.Name(), rr.Stdout)
st = Status(context.Background(), t, Target(), profile, "APIServer")
if st != state.Running.String() {
t.Logf("%q apiserver is not running, skipping kubectl commands (state=%q)", profile, st)
return
}
t.Logf("--------------------post-mortem get pods--------------------------")
t.Logf("======> post-mortem[%s]: get pods <======", t.Name())
rr, rerr := Run(t, exec.Command("kubectl", "--context", profile, "get", "po", "-A", "--show-labels"))
if rerr != nil {
t.Logf("%s: %v", rr.Command(), rerr)
return
}
t.Logf("(dbg) %s:\n%s", rr.Command(), rr.Output())
t.Logf("-------------------post-mortem describe node----------------------")
t.Logf("======> post-mortem[%s]: describe node <======", t.Name())
rr, err = Run(t, exec.Command("kubectl", "--context", profile, "describe", "node"))
if err != nil {
t.Logf("%s: %v", rr.Command(), err)
} else {
t.Logf("(dbg) %s:\n%s", rr.Command(), rr.Output())
}
t.Logf("------------------------------------------------------------------")
t.Logf("======> post-mortem[%s]: describe pods <======", t.Name())
rr, err = Run(t, exec.Command("kubectl", "--context", profile, "describe", "po", "-A"))
if err != nil {
t.Logf("%s: %v", rr.Command(), err)
} else {
t.Logf("(dbg) %s:\n%s", rr.Command(), rr.Stdout)
}
t.Logf("<<< %s FAILED: end of post-mortem logs <<<", t.Name())
t.Logf("---------------------/post-mortem---------------------------------")
}

View File

@ -405,8 +405,8 @@
"Suggestion: {{.advice}}": "",
"Suggestion: {{.fix}}": "",
"Target directory {{.path}} must be an absolute path": "",
"The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube --driver={{.driver_name}}'.": "",
"The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube --vm-driver={{.driver_name}}": "Der Treiber \"{{.driver_name}}\" benötigt Root-Rechte. Führen Sie minikube aus mit 'sudo minikube --vm-driver = {{. Driver_name}}.",
"The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube start --driver={{.driver_name}}'.": "",
"The \"{{.driver_name}}\" driver should not be used with root privileges.": "",
"The \"{{.name}}\" cluster has been deleted.": "Der Cluster \"{{.name}}\" wurde gelöscht.",
"The \"{{.name}}\" cluster has been deleted.__1": "Der Cluster \"{{.name}}\" wurde gelöscht.",
@ -579,13 +579,13 @@
"You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details": "Sie scheinen einen Proxy zu verwenden, aber Ihre NO_PROXY-Umgebung enthält keine minikube-IP ({{.ip_address}}). Weitere Informationen finden Sie unter {{.documentation_url}}",
"You can also use 'minikube kubectl -- get pods' to invoke a matching version": "",
"You can delete them using the following command(s):": "",
"You cannot change the CPUs for an exiting minikube cluster. Please first delete the cluster.": "",
"You cannot change the Disk size for an exiting minikube cluster. Please first delete the cluster.": "",
"You cannot change the memory size for an exiting minikube cluster. Please first delete the cluster.": "",
"You have selected Kubernetes v{{.new}}, but the existing cluster is running Kubernetes v{{.old}}": "",
"You may need to manually remove the \"{{.name}}\" VM from your hypervisor": "Möglicherweise müssen Sie die VM \"{{.name}}\" manuell von Ihrem Hypervisor entfernen",
"You may need to stop the Hyper-V Manager and run `minikube delete` again.": "",
"You must specify a service name": "",
"You not the change the CPUs for an exiting minikube cluster. Pease first delete the cluster.": "",
"You not the change the Disk size for an exiting minikube cluster. Pease first delete the cluster.": "",
"You not the change the memory size for an exiting minikube cluster. Pease first delete the cluster.": "",
"Your host does not support KVM virtualization. Ensure that qemu-kvm is installed, and run 'virt-host-validate' to debug the problem": "",
"Your host does not support virtualization. If you are running minikube within a VM, try '--driver=docker'. Otherwise, enable virtualization in your BIOS": "",
"Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.": "",

View File

@ -406,8 +406,8 @@
"Suggestion: {{.advice}}": "",
"Suggestion: {{.fix}}": "",
"Target directory {{.path}} must be an absolute path": "",
"The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube --driver={{.driver_name}}'.": "",
"The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube --vm-driver={{.driver_name}}": "El controlador \"{{.driver_name}}\" requiere privilegios de raíz. Ejecuta minikube mediante sudo minikube --vm-driver={{.driver_name}}",
"The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube start --driver={{.driver_name}}'.": "",
"The \"{{.driver_name}}\" driver should not be used with root privileges.": "",
"The \"{{.name}}\" cluster has been deleted.": "Se ha eliminado el clúster \"{{.name}}\".",
"The \"{{.name}}\" cluster has been deleted.__1": "Se ha eliminado el clúster \"{{.name}}\".",
@ -580,13 +580,13 @@
"You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details": "Parece que estás usando un proxy, pero tu entorno NO_PROXY no incluye la dirección IP de minikube ({{.ip_address}}). Consulta {{.documentation_url}} para obtener más información",
"You can also use 'minikube kubectl -- get pods' to invoke a matching version": "",
"You can delete them using the following command(s):": "",
"You cannot change the CPUs for an exiting minikube cluster. Please first delete the cluster.": "",
"You cannot change the Disk size for an exiting minikube cluster. Please first delete the cluster.": "",
"You cannot change the memory size for an exiting minikube cluster. Please first delete the cluster.": "",
"You have selected Kubernetes v{{.new}}, but the existing cluster is running Kubernetes v{{.old}}": "",
"You may need to manually remove the \"{{.name}}\" VM from your hypervisor": "Puede que tengas que retirar manualmente la VM \"{{.name}}\" de tu hipervisor",
"You may need to stop the Hyper-V Manager and run `minikube delete` again.": "",
"You must specify a service name": "",
"You not the change the CPUs for an exiting minikube cluster. Pease first delete the cluster.": "",
"You not the change the Disk size for an exiting minikube cluster. Pease first delete the cluster.": "",
"You not the change the memory size for an exiting minikube cluster. Pease first delete the cluster.": "",
"Your host does not support KVM virtualization. Ensure that qemu-kvm is installed, and run 'virt-host-validate' to debug the problem": "",
"Your host does not support virtualization. If you are running minikube within a VM, try '--driver=docker'. Otherwise, enable virtualization in your BIOS": "",
"Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.": "",

View File

@ -37,8 +37,8 @@
"Amount of time to wait for service in seconds": "",
"Another hypervisor, such as VirtualBox, is conflicting with KVM. Please stop the other hypervisor, or use --driver to switch to it.": "",
"Another program is using a file required by minikube. If you are using Hyper-V, try stopping the minikube VM from within the Hyper-V manager": "",
"Automatically selected the {{.driver}} driver": "Choix automatique du driver {{.driver}}",
"Automatically selected the {{.driver}} driver. Other choices: {{.alternates}}": "Choix automatique du driver {{.driver}}. Autres choix: {{.alternatives}}",
"Automatically selected the {{.driver}} driver": "Choix automatique du pilote {{.driver}}",
"Automatically selected the {{.driver}} driver. Other choices: {{.alternates}}": "Choix automatique du pilote {{.driver}}. Autres choix: {{.alternatives}}",
"Available Commands": "",
"Basic Commands:": "",
"Because you are using docker driver on Mac, the terminal needs to be open to run it.": "",
@ -386,8 +386,8 @@
"Specify the mount filesystem type (supported types: 9p)": "",
"Start failed after cluster deletion": "",
"StartHost failed, but will try again: {{.error}}": "",
"Starting control plane node {{.name}} in cluster {{.cluster}}": "",
"Starting node {{.name}} in cluster {{.cluster}}": "",
"Starting control plane node {{.name}} in cluster {{.cluster}}": "Démarrage du noeud de plan de contrôle {{.name}} dans le cluster {{.cluster}}",
"Starting node {{.name}} in cluster {{.cluster}}": "Démarrage du noeud {{.name}} dans le cluster {{.cluster}}",
"Starting tunnel for service {{.service}}.": "",
"Starts a local kubernetes cluster": "Démarre un cluster Kubernetes local.",
"Starts a node.": "",
@ -404,8 +404,8 @@
"Suggestion: {{.advice}}": "",
"Suggestion: {{.fix}}": "",
"Target directory {{.path}} must be an absolute path": "",
"The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube --driver={{.driver_name}}'.": "",
"The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube --vm-driver={{.driver_name}}": "Le pilote \"{{.driver_name}}\" nécessite de disposer de droits racine. Veuillez exécuter minikube à l'aide de \"sudo minikube --vm-driver={{.driver_name}}\".",
"The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube start --driver={{.driver_name}}'.": "",
"The \"{{.driver_name}}\" driver should not be used with root privileges.": "",
"The 'none' driver is designed for experts who need to integrate with an existing VM": "",
"The 'none' driver provides limited isolation and may reduce system security and reliability.": "L'isolation fournie par le pilote \"none\" (aucun) est limitée, ce qui peut diminuer la sécurité et la fiabilité du système.",
@ -533,7 +533,7 @@
"Unset the KUBECONFIG environment variable, or verify that it does not point to an empty or otherwise invalid path": "",
"Unset variables instead of setting them": "",
"Update server returned an empty list": "",
"Updating the running {{.driver_name}} \"{{.cluster}}\" {{.machine_type}} ...": "",
"Updating the running {{.driver_name}} \"{{.cluster}}\" {{.machine_type}} ...": "Mise à jour du {{.machine_type}} {{.driver_name}} en marche \"{{.cluster}}\" ...",
"Upgrade to QEMU v3.1.0+, run 'virt-host-validate', or ensure that you are not running in a nested VM environment.": "",
"Upgrading from Kubernetes {{.old}} to {{.new}}": "Mise à niveau de Kubernetes de la version {{.old}} à la version {{.new}}…",
"Usage": "Usage",
@ -554,8 +554,8 @@
"Userspace file server:": "",
"Using image repository {{.name}}": "Utilisation du dépôt d'images {{.name}}…",
"Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!": "",
"Using the {{.driver}} driver based on existing profile": "",
"Using the {{.driver}} driver based on user configuration": "",
"Using the {{.driver}} driver based on existing profile": "Utilisation du pilote {{.driver}} basé sur le profil existant",
"Using the {{.driver}} driver based on user configuration": "Utilisation du pilote {{.driver}} basé sur la configuration de l'utilisateur",
"VM driver is one of: %v": "Le pilote de la VM appartient à : %v",
"Validation unable to parse disk size '{{.diskSize}}': {{.error}}": "",
"Verify that your HTTP_PROXY and HTTPS_PROXY environment variables are set correctly.": "",
@ -579,13 +579,13 @@
"You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details": "Il semble que vous utilisiez un proxy, mais votre environment NO_PROXY n'inclut pas l'adresse IP ({{.ip_address}}) de minikube. Consultez la documentation à l'adresse {{.documentation_url}} pour en savoir plus.",
"You can also use 'minikube kubectl -- get pods' to invoke a matching version": "",
"You can delete them using the following command(s):": "",
"You cannot change the CPUs for an exiting minikube cluster. Please first delete the cluster.": "",
"You cannot change the Disk size for an exiting minikube cluster. Please first delete the cluster.": "",
"You cannot change the memory size for an exiting minikube cluster. Please first delete the cluster.": "",
"You have selected Kubernetes v{{.new}}, but the existing cluster is running Kubernetes v{{.old}}": "",
"You may need to manually remove the \"{{.name}}\" VM from your hypervisor": "Vous devrez peut-être supprimer la VM \"{{.name}}\" manuellement de votre hyperviseur.",
"You may need to stop the Hyper-V Manager and run `minikube delete` again.": "",
"You must specify a service name": "",
"You not the change the CPUs for an exiting minikube cluster. Pease first delete the cluster.": "",
"You not the change the Disk size for an exiting minikube cluster. Pease first delete the cluster.": "",
"You not the change the memory size for an exiting minikube cluster. Pease first delete the cluster.": "",
"Your host does not support KVM virtualization. Ensure that qemu-kvm is installed, and run 'virt-host-validate' to debug the problem": "",
"Your host does not support virtualization. If you are running minikube within a VM, try '--driver=docker'. Otherwise, enable virtualization in your BIOS": "",
"Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.": "",
@ -684,4 +684,4 @@
"{{.prefix}}minikube {{.version}} on {{.platform}}": "{{.prefix}}minikube {{.version}} sur {{.platform}}",
"{{.type}} is not yet a supported filesystem. We will try anyways!": "",
"{{.url}} is not accessible: {{.error}}": ""
}
}

View File

@ -416,8 +416,8 @@
"Suggestion: {{.advice}}": "",
"Suggestion: {{.fix}}": "",
"Target directory {{.path}} must be an absolute path": "",
"The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube --driver={{.driver_name}}'.": "",
"The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube --vm-driver={{.driver_name}}": "「{{.driver_name}}」ドライバにはルート権限が必要です。「sudo minikube --vm-driver={{.driver_name}}」を使用して minikube を実行してください",
"The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube start --driver={{.driver_name}}'.": "",
"The \"{{.driver_name}}\" driver should not be used with root privileges.": "",
"The \"{{.name}}\" cluster has been deleted.": "「{{.name}}」クラスタが削除されました",
"The \"{{.name}}\" cluster has been deleted.__1": "「{{.name}}」クラスタが削除されました",
@ -590,13 +590,13 @@
"You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details": "プロキシを使用しようとしていますが、現在の NO_PROXY 環境に minikube IP{{.ip_address}})は含まれていません。詳細については、{{.documentation_url}} をご覧ください",
"You can also use 'minikube kubectl -- get pods' to invoke a matching version": "「 minikube kubectl -- get pods 」で、一致するバージョンを表示することができます",
"You can delete them using the following command(s):": "以下のコマンドで削除することができます",
"You cannot change the CPUs for an exiting minikube cluster. Please first delete the cluster.": "",
"You cannot change the Disk size for an exiting minikube cluster. Please first delete the cluster.": "",
"You cannot change the memory size for an exiting minikube cluster. Please first delete the cluster.": "",
"You have selected Kubernetes v{{.new}}, but the existing cluster is running Kubernetes v{{.old}}": "",
"You may need to manually remove the \"{{.name}}\" VM from your hypervisor": "ハイパーバイザから「{{.name}}」VM を手動で削除することが必要な可能性があります",
"You may need to stop the Hyper-V Manager and run `minikube delete` again.": "Hyper-V マネージャを停止して、「 minikube delete 」を再実行する必要があるかもしれません ",
"You must specify a service name": "サービスの名前を明示する必要があります",
"You not the change the CPUs for an exiting minikube cluster. Pease first delete the cluster.": "",
"You not the change the Disk size for an exiting minikube cluster. Pease first delete the cluster.": "",
"You not the change the memory size for an exiting minikube cluster. Pease first delete the cluster.": "",
"Your host does not support KVM virtualization. Ensure that qemu-kvm is installed, and run 'virt-host-validate' to debug the problem": "ホストマシーンは KVM 仮想化をサポートしていません。 qemu-kvm がインストールされていることを確認してください。「 virt-host-validate 」を実行して、デバッグしてください",
"Your host does not support virtualization. If you are running minikube within a VM, try '--driver=docker'. Otherwise, enable virtualization in your BIOS": "ホストマシーンは仮想化をサポートしていません。もし VM 内で minikube を動かすのであれば、「 --driver=docker 」を試してください。そうでなければ、 BIOS で仮想化を有効にしてください",
"Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.": "ホストマシーンが minikube の VM にパケットをルーティングすることができていません。もし VPN を有効しているのであれば、VPN を無効にする、あるいは VM の IP アドレスに再ルーティングしないように設定してください。もし VPN を使用していないのであれば、 VM 環境のルーティング周りのオプションを確認してください",

View File

@ -413,6 +413,7 @@
"Suggestion: {{.fix}}": "",
"Target directory {{.path}} must be an absolute path": "타겟 폴더 {{.path}} 는 절대 경로여야 합니다",
"The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube --driver={{.driver_name}}'.": "\"{{.driver_name}}\" 드라이버는 root 권한으로 실행되어야 합니다. minikube 를 다음과 같이 실행하세요 'sudo minikube --driver={{.driver_name}}'",
"The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube start --driver={{.driver_name}}'.": "",
"The \"{{.driver_name}}\" driver should not be used with root privileges.": "\"{{.driver_name}}\" 드라이버는 root 권한으로 실행되면 안 됩니다",
"The 'none' driver is designed for experts who need to integrate with an existing VM": "",
"The '{{.addonName}}' addon is enabled": "",
@ -570,13 +571,13 @@
"You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details": "",
"You can also use 'minikube kubectl -- get pods' to invoke a matching version": "맞는 버전의 kubectl 을 사용하기 위해서는 다음과 같이 사용 가능합니다. minikube kubectl -- get pods'",
"You can delete them using the following command(s):": "다음 커맨드(들)을 사용하여 제거할 수 있습니다",
"You cannot change the CPUs for an exiting minikube cluster. Please first delete the cluster.": "",
"You cannot change the Disk size for an exiting minikube cluster. Please first delete the cluster.": "",
"You cannot change the memory size for an exiting minikube cluster. Please first delete the cluster.": "",
"You have selected Kubernetes v{{.new}}, but the existing cluster is running Kubernetes v{{.old}}": "",
"You may need to manually remove the \"{{.name}}\" VM from your hypervisor": "",
"You may need to stop the Hyper-V Manager and run `minikube delete` again.": "",
"You must specify a service name": "service 이름을 명시해야 합니다",
"You not the change the CPUs for an exiting minikube cluster. Pease first delete the cluster.": "",
"You not the change the Disk size for an exiting minikube cluster. Pease first delete the cluster.": "",
"You not the change the memory size for an exiting minikube cluster. Pease first delete the cluster.": "",
"Your host does not support KVM virtualization. Ensure that qemu-kvm is installed, and run 'virt-host-validate' to debug the problem": "호스트가 KVM 가상화를 지원하지 않습니다. qemu-kvm 이 설치되었는지 확인 후, 문제 디버그를 위해 'virt-host-validate' 를 실행하세요",
"Your host does not support virtualization. If you are running minikube within a VM, try '--driver=docker'. Otherwise, enable virtualization in your BIOS": "",
"Your host does not support virtualization. If you are running minikube within a VM, try '--driver=none'. Otherwise, enable virtualization in your BIOS": "호스트가 가상화를 지원하지 않습니다. 가상 머신 안에서 minikube 를 실행 중인 경우, '--driver=none' 로 시도하세요. 그렇지 않다면, BIOS 에서 가상화를 활성화하세요",

View File

@ -190,8 +190,8 @@
"Failed to setup kubeconfig": "Konfiguracja kubeconfig nie powiodła się",
"Failed to stop node {{.name}}": "",
"Failed to update cluster": "Aktualizacja klastra nie powiodła się",
"Failed to validate '{{.driver}}' driver": "",
"Failed to update config": "Aktualizacja konfiguracji nie powiodła się",
"Failed to validate '{{.driver}}' driver": "",
"Failed unmount: {{.error}}": "",
"File permissions used for the mount": "",
"Filter to use only VM Drivers": "",
@ -410,8 +410,8 @@
"Suggestion: {{.fix}}": "",
"Target directory {{.path}} must be an absolute path": "",
"The \"{{.cluster_name}}\" cluster has been deleted.": "Klaster \"{{.cluster_name}}\" został usunięty",
"The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube --driver={{.driver_name}}'.": "",
"The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube --vm-driver={{.driver_name}}'.": "Sterownik \"{{.driver_name}}\" wymaga uprawnień root'a. Użyj 'sudo minikube --vm-driver={{.driver_name}}'",
"The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube start --driver={{.driver_name}}'.": "",
"The \"{{.driver_name}}\" driver should not be used with root privileges.": "",
"The \"{{.name}}\" cluster has been deleted.": "Klaster \"{{.name}}\" został usunięty",
"The 'none' driver is designed for experts who need to integrate with an existing VM": "",
@ -578,13 +578,13 @@
"You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details": "",
"You can also use 'minikube kubectl -- get pods' to invoke a matching version": "",
"You can delete them using the following command(s):": "",
"You cannot change the CPUs for an exiting minikube cluster. Please first delete the cluster.": "",
"You cannot change the Disk size for an exiting minikube cluster. Please first delete the cluster.": "",
"You cannot change the memory size for an exiting minikube cluster. Please first delete the cluster.": "",
"You have selected Kubernetes v{{.new}}, but the existing cluster is running Kubernetes v{{.old}}": "",
"You may need to manually remove the \"{{.name}}\" VM from your hypervisor": "",
"You may need to stop the Hyper-V Manager and run `minikube delete` again.": "",
"You must specify a service name": "Musisz podać nazwę serwisu",
"You not the change the CPUs for an exiting minikube cluster. Pease first delete the cluster.": "",
"You not the change the Disk size for an exiting minikube cluster. Pease first delete the cluster.": "",
"You not the change the memory size for an exiting minikube cluster. Pease first delete the cluster.": "",
"Your host does not support KVM virtualization. Ensure that qemu-kvm is installed, and run 'virt-host-validate' to debug the problem": "Twoje środowisko nie wspiera virtualizacji KVM. Upewnij się że qemu-kvm jest zainstalowane i uruchom 'virt-host-validate' aby rozwiązać problem.",
"Your host does not support virtualization. If you are running minikube within a VM, try '--driver=docker'. Otherwise, enable virtualization in your BIOS": "",
"Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.": "",

View File

@ -471,8 +471,8 @@
"Suggestion: {{.advice}}": "建议:{{.advice}}",
"Suggestion: {{.fix}}": "建议:{{.fix}}",
"Target directory {{.path}} must be an absolute path": "",
"The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube --driver={{.driver_name}}'.": "",
"The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube --vm-driver={{.driver_name}}": "“{{.driver_name}}”驱动程序需要根权限。请使用“sudo minikube --vm-driver={{.driver_name}}”运行 minikube",
"The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube start --driver={{.driver_name}}'.": "",
"The \"{{.driver_name}}\" driver should not be used with root privileges.": "",
"The \"{{.name}}\" cluster has been deleted.": "“{{.name}}”集群已删除。",
"The \"{{.name}}\" cluster has been deleted.__1": "“{{.name}}”集群已删除。",
@ -658,13 +658,13 @@
"You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details": "您似乎正在使用代理,但您的 NO_PROXY 环境不包含 minikube IP ({{.ip_address}})。如需了解详情,请参阅 {{.documentation_url}}",
"You can also use 'minikube kubectl -- get pods' to invoke a matching version": "",
"You can delete them using the following command(s):": "",
"You cannot change the CPUs for an exiting minikube cluster. Please first delete the cluster.": "",
"You cannot change the Disk size for an exiting minikube cluster. Please first delete the cluster.": "",
"You cannot change the memory size for an exiting minikube cluster. Please first delete the cluster.": "",
"You have selected Kubernetes v{{.new}}, but the existing cluster is running Kubernetes v{{.old}}": "",
"You may need to manually remove the \"{{.name}}\" VM from your hypervisor": "您可能需要从管理程序中手动移除“{{.name}}”虚拟机",
"You may need to stop the Hyper-V Manager and run `minikube delete` again.": "",
"You must specify a service name": "",
"You not the change the CPUs for an exiting minikube cluster. Pease first delete the cluster.": "",
"You not the change the Disk size for an exiting minikube cluster. Pease first delete the cluster.": "",
"You not the change the memory size for an exiting minikube cluster. Pease first delete the cluster.": "",
"Your host does not support KVM virtualization. Ensure that qemu-kvm is installed, and run 'virt-host-validate' to debug the problem": "",
"Your host does not support virtualization. If you are running minikube within a VM, try '--driver=docker'. Otherwise, enable virtualization in your BIOS": "",
"Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.": "",