Merge branch 'master' of https://github.com/kubernetes/minikube into generate-docs
commit
cd81888a22
|
@ -63,10 +63,8 @@ var dashboardCmd = &cobra.Command{
|
|||
}
|
||||
}
|
||||
|
||||
kubectl, err := exec.LookPath("kubectl")
|
||||
if err != nil {
|
||||
exit.WithCodeT(exit.NoInput, "kubectl not found in PATH, but is required for the dashboard. Installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl/")
|
||||
}
|
||||
kubectlVersion := co.Config.KubernetesConfig.KubernetesVersion
|
||||
var err error
|
||||
|
||||
// Check dashboard status before enabling it
|
||||
dashboardAddon := assets.Addons["dashboard"]
|
||||
|
@ -90,7 +88,7 @@ var dashboardCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
out.ErrT(out.Launch, "Launching proxy ...")
|
||||
p, hostPort, err := kubectlProxy(kubectl, cname)
|
||||
p, hostPort, err := kubectlProxy(kubectlVersion, cname)
|
||||
if err != nil {
|
||||
exit.WithError("kubectl proxy", err)
|
||||
}
|
||||
|
@ -124,10 +122,17 @@ var dashboardCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
// kubectlProxy runs "kubectl proxy", returning host:port
|
||||
func kubectlProxy(path string, contextName string) (*exec.Cmd, string, error) {
|
||||
func kubectlProxy(kubectlVersion string, contextName string) (*exec.Cmd, string, error) {
|
||||
// port=0 picks a random system port
|
||||
|
||||
cmd := exec.Command(path, "--context", contextName, "proxy", "--port=0")
|
||||
kubectlArgs := []string{"--context", contextName, "proxy", "--port=0"}
|
||||
|
||||
var cmd *exec.Cmd
|
||||
if kubectl, err := exec.LookPath("kubectl"); err == nil {
|
||||
cmd = exec.Command(kubectl, kubectlArgs...)
|
||||
} else if cmd, err = KubectlCommand(kubectlVersion, kubectlArgs...); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
stdoutPipe, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
|
|
|
@ -43,17 +43,12 @@ minikube kubectl -- get pods --namespace kube-system`,
|
|||
co := mustload.Healthy(ClusterFlagValue())
|
||||
|
||||
version := co.Config.KubernetesConfig.KubernetesVersion
|
||||
if version == "" {
|
||||
version = constants.DefaultKubernetesVersion
|
||||
}
|
||||
|
||||
path, err := node.CacheKubectlBinary(version)
|
||||
c, err := KubectlCommand(version, args...)
|
||||
if err != nil {
|
||||
out.ErrLn("Error caching kubectl: %v", err)
|
||||
}
|
||||
|
||||
glog.Infof("Running %s %v", path, args)
|
||||
c := exec.Command(path, args...)
|
||||
c.Stdin = os.Stdin
|
||||
c.Stdout = os.Stdout
|
||||
c.Stderr = os.Stderr
|
||||
|
@ -70,3 +65,17 @@ minikube kubectl -- get pods --namespace kube-system`,
|
|||
}
|
||||
},
|
||||
}
|
||||
|
||||
// KubectlCommand will return kubectl command with a version matching the cluster
|
||||
func KubectlCommand(version string, args ...string) (*exec.Cmd, error) {
|
||||
if version == "" {
|
||||
version = constants.DefaultKubernetesVersion
|
||||
}
|
||||
|
||||
path, err := node.CacheKubectlBinary(version)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return exec.Command(path, args...), nil
|
||||
}
|
||||
|
|
|
@ -18,10 +18,8 @@ package cmd
|
|||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
"k8s.io/minikube/pkg/minikube/node"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
|
@ -56,7 +54,7 @@ var nodeAddCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
if err := node.Add(cc, n); err != nil {
|
||||
exit.WithError("Error adding node to cluster", err)
|
||||
maybeDeleteAndRetry(*cc, n, nil, err)
|
||||
}
|
||||
|
||||
out.T(out.Ready, "Successfully added {{.name}} to {{.cluster}}!", out.V{"name": name, "cluster": cc.Name})
|
||||
|
@ -64,13 +62,10 @@ var nodeAddCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
func init() {
|
||||
// TODO(https://github.com/kubernetes/minikube/issues/7366): We should figure out which minikube start flags to actually import
|
||||
nodeAddCmd.Flags().BoolVar(&cp, "control-plane", false, "If true, the node added will also be a control plane in addition to a worker.")
|
||||
nodeAddCmd.Flags().BoolVar(&worker, "worker", true, "If true, the added node will be marked for work. Defaults to true.")
|
||||
//We should figure out which of these flags to actually import
|
||||
startCmd.Flags().Visit(
|
||||
func(f *pflag.Flag) {
|
||||
nodeAddCmd.Flags().AddFlag(f)
|
||||
},
|
||||
)
|
||||
nodeAddCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.")
|
||||
|
||||
nodeCmd.AddCommand(nodeAddCmd)
|
||||
}
|
||||
|
|
|
@ -49,12 +49,15 @@ var nodeStartCmd = &cobra.Command{
|
|||
exit.WithError("retrieving node", err)
|
||||
}
|
||||
|
||||
// Start it up baby
|
||||
node.Start(*cc, *n, nil, false)
|
||||
_, err = node.Start(*cc, *n, nil, false)
|
||||
if err != nil {
|
||||
maybeDeleteAndRetry(*cc, *n, nil, err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
nodeStartCmd.Flags().String("name", "", "The name of the node to start")
|
||||
nodeStartCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.")
|
||||
nodeCmd.AddCommand(nodeStartCmd)
|
||||
}
|
||||
|
|
|
@ -124,6 +124,7 @@ const (
|
|||
natNicType = "nat-nic-type"
|
||||
nodes = "nodes"
|
||||
preload = "preload"
|
||||
deleteOnFailure = "delete-on-failure"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -177,6 +178,7 @@ func initMinikubeFlags() {
|
|||
startCmd.Flags().Bool(installAddons, true, "If set, install addons. Defaults to true.")
|
||||
startCmd.Flags().IntP(nodes, "n", 1, "The number of nodes to spin up. Defaults to 1.")
|
||||
startCmd.Flags().Bool(preload, true, "If set, download tarball of preloaded images if available to improve start time. Defaults to true.")
|
||||
startCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.")
|
||||
}
|
||||
|
||||
// initKubernetesFlags inits the commandline flags for kubernetes related options
|
||||
|
@ -353,7 +355,10 @@ func runStart(cmd *cobra.Command, args []string) {
|
|||
}
|
||||
}
|
||||
|
||||
kubeconfig := node.Start(cc, n, existingAddons, true)
|
||||
kubeconfig, err := node.Start(cc, n, existingAddons, true)
|
||||
if err != nil {
|
||||
kubeconfig = maybeDeleteAndRetry(cc, n, existingAddons, err)
|
||||
}
|
||||
|
||||
numNodes := viper.GetInt(nodes)
|
||||
if numNodes == 1 && existing != nil {
|
||||
|
@ -433,22 +438,12 @@ func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName st
|
|||
return nil
|
||||
}
|
||||
|
||||
j, err := exec.Command(path, "version", "--client", "--output=json").Output()
|
||||
gitVersion, err := kubectlVersion(path)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "exec")
|
||||
return err
|
||||
}
|
||||
|
||||
cv := struct {
|
||||
ClientVersion struct {
|
||||
GitVersion string `json:"gitVersion"`
|
||||
} `json:"clientVersion"`
|
||||
}{}
|
||||
err = json.Unmarshal(j, &cv)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unmarshal")
|
||||
}
|
||||
|
||||
client, err := semver.Make(strings.TrimPrefix(cv.ClientVersion.GitVersion, version.VersionPrefix))
|
||||
client, err := semver.Make(strings.TrimPrefix(gitVersion, version.VersionPrefix))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "client semver")
|
||||
}
|
||||
|
@ -467,6 +462,63 @@ func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName st
|
|||
return nil
|
||||
}
|
||||
|
||||
func maybeDeleteAndRetry(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool, originalErr error) *kubeconfig.Settings {
|
||||
if viper.GetBool(deleteOnFailure) {
|
||||
out.T(out.Warning, "Node {{.name}} failed to start, deleting and trying again.", out.V{"name": n.Name})
|
||||
// Start failed, delete the cluster and try again
|
||||
profile, err := config.LoadProfile(cc.Name)
|
||||
if err != nil {
|
||||
out.ErrT(out.Meh, `"{{.name}}" profile does not exist, trying anyways.`, out.V{"name": cc.Name})
|
||||
}
|
||||
|
||||
err = deleteProfile(profile)
|
||||
if err != nil {
|
||||
out.WarningT("Failed to delete cluster {{.name}}, proceeding with retry anyway.", out.V{"name": cc.Name})
|
||||
}
|
||||
|
||||
var kubeconfig *kubeconfig.Settings
|
||||
for _, v := range cc.Nodes {
|
||||
k, err := node.Start(cc, v, existingAddons, v.ControlPlane)
|
||||
if v.ControlPlane {
|
||||
kubeconfig = k
|
||||
}
|
||||
if err != nil {
|
||||
// Ok we failed again, let's bail
|
||||
exit.WithError("Start failed after cluster deletion", err)
|
||||
}
|
||||
}
|
||||
return kubeconfig
|
||||
}
|
||||
// Don't delete the cluster unless they ask
|
||||
exit.WithError("startup failed", originalErr)
|
||||
return nil
|
||||
}
|
||||
|
||||
func kubectlVersion(path string) (string, error) {
|
||||
j, err := exec.Command(path, "version", "--client", "--output=json").Output()
|
||||
if err != nil {
|
||||
// really old kubernetes clients did not have the --output parameter
|
||||
b, err := exec.Command(path, "version", "--client", "--short").Output()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "exec")
|
||||
}
|
||||
s := strings.TrimSpace(string(b))
|
||||
return strings.Replace(s, "Client Version: ", "", 1), nil
|
||||
}
|
||||
|
||||
cv := struct {
|
||||
ClientVersion struct {
|
||||
GitVersion string `json:"gitVersion"`
|
||||
} `json:"clientVersion"`
|
||||
}{}
|
||||
err = json.Unmarshal(j, &cv)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "unmarshal")
|
||||
}
|
||||
|
||||
return cv.ClientVersion.GitVersion, nil
|
||||
}
|
||||
|
||||
func selectDriver(existing *config.ClusterConfig) registry.DriverState {
|
||||
// Technically unrelated, but important to perform before detection
|
||||
driver.SetLibvirtURI(viper.GetString(kvmQemuURI))
|
||||
|
|
|
@ -88,7 +88,7 @@ func stop(api libmachine.API, cluster config.ClusterConfig, n config.Node) bool
|
|||
}
|
||||
}
|
||||
|
||||
if err := retry.Expo(tryStop, 1*time.Second, 30*time.Second, 3); err != nil {
|
||||
if err := retry.Expo(tryStop, 1*time.Second, 120*time.Second, 5); err != nil {
|
||||
exit.WithError("Unable to stop VM", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -17,20 +17,56 @@ limitations under the License.
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v2"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
"k8s.io/minikube/pkg/version"
|
||||
)
|
||||
|
||||
var (
|
||||
versionOutput string
|
||||
shortVersion bool
|
||||
)
|
||||
|
||||
var versionCmd = &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Print the version of minikube",
|
||||
Long: `Print the version of minikube.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
out.Ln("minikube version: %v", version.GetVersion())
|
||||
minikubeVersion := version.GetVersion()
|
||||
gitCommitID := version.GetGitCommitID()
|
||||
if gitCommitID != "" {
|
||||
out.Ln("commit: %v", gitCommitID)
|
||||
data := map[string]string{
|
||||
"minikubeVersion": minikubeVersion,
|
||||
"commit": gitCommitID,
|
||||
}
|
||||
switch versionOutput {
|
||||
case "":
|
||||
out.Ln("minikube version: %v", minikubeVersion)
|
||||
if !shortVersion && gitCommitID != "" {
|
||||
out.Ln("commit: %v", gitCommitID)
|
||||
}
|
||||
case "json":
|
||||
json, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
exit.WithError("version json failure", err)
|
||||
}
|
||||
out.Ln(string(json))
|
||||
case "yaml":
|
||||
yaml, err := yaml.Marshal(data)
|
||||
if err != nil {
|
||||
exit.WithError("version yaml failure", err)
|
||||
}
|
||||
out.Ln(string(yaml))
|
||||
default:
|
||||
exit.WithCodeT(exit.BadUsage, "error: --output must be 'yaml' or 'json'")
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
versionCmd.Flags().StringVarP(&versionOutput, "output", "o", "", "One of 'yaml' or 'json'.")
|
||||
versionCmd.Flags().BoolVar(&shortVersion, "short", false, "Print just the version number.")
|
||||
}
|
||||
|
|
|
@ -64,6 +64,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -73,6 +73,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -68,4 +68,17 @@ networking:
|
|||
dnsDomain: {{if .DNSDomain}}{{.DNSDomain}}{{else}}cluster.local{{end}}
|
||||
podSubnet: "{{.PodSubnet }}"
|
||||
serviceSubnet: {{.ServiceCIDR}}
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: {{.AdvertiseAddress}}:10249
|
||||
`))
|
||||
|
|
|
@ -36,6 +36,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -36,6 +36,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -36,6 +36,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -44,6 +44,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -36,6 +36,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -36,6 +36,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -36,6 +36,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -37,6 +37,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -41,6 +41,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -36,6 +36,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -36,6 +36,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -36,6 +36,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -44,6 +44,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -36,6 +36,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -36,6 +36,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -36,6 +36,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -37,6 +37,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -41,6 +41,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -51,6 +51,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -42,6 +42,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -48,6 +48,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -51,6 +51,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -42,6 +42,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -48,6 +48,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -51,6 +51,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -42,6 +42,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -48,6 +48,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -36,3 +36,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -36,3 +36,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: "192.168.32.0/20"
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -36,3 +36,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -46,3 +46,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -36,3 +36,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -36,3 +36,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -36,3 +36,16 @@ networking:
|
|||
dnsDomain: 1.1.1.1
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -37,3 +37,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -43,3 +43,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -36,3 +36,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -36,3 +36,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: "192.168.32.0/20"
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -36,3 +36,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -46,3 +46,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -36,3 +36,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -36,3 +36,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -36,3 +36,16 @@ networking:
|
|||
dnsDomain: 1.1.1.1
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -37,3 +37,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -43,3 +43,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -36,3 +36,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -36,3 +36,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: "192.168.32.0/20"
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -36,3 +36,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -46,3 +46,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -36,3 +36,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -36,3 +36,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -36,3 +36,16 @@ networking:
|
|||
dnsDomain: 1.1.1.1
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -37,3 +37,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -43,3 +43,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -61,16 +61,16 @@ func WithCodeT(code int, format string, a ...out.V) {
|
|||
func WithError(msg string, err error) {
|
||||
p := problem.FromError(err, runtime.GOOS)
|
||||
if p != nil {
|
||||
WithProblem(msg, p)
|
||||
WithProblem(msg, err, p)
|
||||
}
|
||||
displayError(msg, err)
|
||||
os.Exit(Software)
|
||||
}
|
||||
|
||||
// WithProblem outputs info related to a known problem and exits.
|
||||
func WithProblem(msg string, p *problem.Problem) {
|
||||
func WithProblem(msg string, err error, p *problem.Problem) {
|
||||
out.ErrT(out.Empty, "")
|
||||
out.FatalT(msg)
|
||||
out.ErrT(out.FailureType, "[{{.id}}] {{.msg}} {{.error}}", out.V{"msg": msg, "id": p.ID, "error": p.Err})
|
||||
p.Display()
|
||||
if p.ShowIssueLink {
|
||||
out.ErrT(out.Empty, "")
|
||||
|
|
|
@ -38,13 +38,14 @@ func ConfigFile() string {
|
|||
|
||||
// MiniPath returns the path to the user's minikube dir
|
||||
func MiniPath() string {
|
||||
if os.Getenv(MinikubeHome) == "" {
|
||||
minikubeHomeEnv := os.Getenv(MinikubeHome)
|
||||
if minikubeHomeEnv == "" {
|
||||
return filepath.Join(homedir.HomeDir(), ".minikube")
|
||||
}
|
||||
if filepath.Base(os.Getenv(MinikubeHome)) == ".minikube" {
|
||||
return os.Getenv(MinikubeHome)
|
||||
if filepath.Base(minikubeHomeEnv) == ".minikube" {
|
||||
return minikubeHomeEnv
|
||||
}
|
||||
return filepath.Join(os.Getenv(MinikubeHome), ".minikube")
|
||||
return filepath.Join(minikubeHomeEnv, ".minikube")
|
||||
}
|
||||
|
||||
// MakeMiniPath is a utility to calculate a relative path to our directory.
|
||||
|
|
|
@ -17,10 +17,15 @@ limitations under the License.
|
|||
package localpath
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/client-go/util/homedir"
|
||||
)
|
||||
|
||||
func TestReplaceWinDriveLetterToVolumeName(t *testing.T) {
|
||||
|
@ -61,3 +66,95 @@ func TestHasWindowsDriveLetter(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMiniPath(t *testing.T) {
|
||||
var testCases = []struct {
|
||||
env, basePath string
|
||||
}{
|
||||
{"/tmp/.minikube", "/tmp/"},
|
||||
{"/tmp/", "/tmp"},
|
||||
{"", homedir.HomeDir()},
|
||||
}
|
||||
originalEnv := os.Getenv(MinikubeHome)
|
||||
defer func() { // revert to pre-test env var
|
||||
err := os.Setenv(MinikubeHome, originalEnv)
|
||||
if err != nil {
|
||||
t.Fatalf("Error reverting env %s to its original value (%s) var after test ", MinikubeHome, originalEnv)
|
||||
}
|
||||
}()
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.env, func(t *testing.T) {
|
||||
expectedPath := filepath.Join(tc.basePath, ".minikube")
|
||||
os.Setenv(MinikubeHome, tc.env)
|
||||
path := MiniPath()
|
||||
if path != expectedPath {
|
||||
t.Errorf("MiniPath expected to return '%s', but got '%s'", expectedPath, path)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMachinePath(t *testing.T) {
|
||||
var testCases = []struct {
|
||||
miniHome []string
|
||||
contains string
|
||||
}{
|
||||
{[]string{"tmp", "foo", "bar", "baz"}, "tmp"},
|
||||
{[]string{"tmp"}, "tmp"},
|
||||
{[]string{}, MiniPath()},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("%s", tc.miniHome), func(t *testing.T) {
|
||||
machinePath := MachinePath("foo", tc.miniHome...)
|
||||
if !strings.Contains(machinePath, tc.contains) {
|
||||
t.Errorf("Function MachinePath returned (%v) which doesn't contain expected (%v)", machinePath, tc.contains)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type propertyFnWithArg func(string) string
|
||||
|
||||
func TestPropertyWithNameArg(t *testing.T) {
|
||||
var testCases = []struct {
|
||||
propertyFunc propertyFnWithArg
|
||||
name string
|
||||
}{
|
||||
{Profile, "Profile"},
|
||||
{ClientCert, "ClientCert"},
|
||||
{ClientKey, "ClientKey"},
|
||||
}
|
||||
miniPath := MiniPath()
|
||||
mockedName := "foo"
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
if !strings.Contains(tc.propertyFunc(mockedName), MiniPath()) {
|
||||
t.Errorf("Property %s(%v) doesn't contain miniPath %v", tc.name, tc.propertyFunc, miniPath)
|
||||
}
|
||||
if !strings.Contains(tc.propertyFunc(mockedName), mockedName) {
|
||||
t.Errorf("Property %s(%v) doesn't contain passed name %v", tc.name, tc.propertyFunc, mockedName)
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
type propertyFnWithoutArg func() string
|
||||
|
||||
func TestPropertyWithoutNameArg(t *testing.T) {
|
||||
var testCases = []struct {
|
||||
propertyFunc propertyFnWithoutArg
|
||||
name string
|
||||
}{
|
||||
{ConfigFile, "ConfigFile"},
|
||||
{CACert, "CACert"},
|
||||
}
|
||||
miniPath := MiniPath()
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
if !strings.Contains(tc.propertyFunc(), MiniPath()) {
|
||||
t.Errorf("Property %s(%v) doesn't contain expected miniPath %v", tc.name, tc.propertyFunc, miniPath)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -158,36 +159,51 @@ func needsTransfer(imgClient *client.Client, imgName string, cr cruntime.Manager
|
|||
|
||||
// CacheAndLoadImages caches and loads images to all profiles
|
||||
func CacheAndLoadImages(images []string) error {
|
||||
// This is the most important thing
|
||||
if err := image.SaveToDir(images, constants.ImageCacheDir); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "save to dir")
|
||||
}
|
||||
|
||||
api, err := NewAPIClient()
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "api")
|
||||
}
|
||||
defer api.Close()
|
||||
profiles, _, err := config.ListProfiles() // need to load image to all profiles
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "list profiles")
|
||||
}
|
||||
|
||||
succeeded := []string{}
|
||||
failed := []string{}
|
||||
|
||||
for _, p := range profiles { // loading images to all running profiles
|
||||
pName := p.Name // capture the loop variable
|
||||
|
||||
c, err := config.Load(pName)
|
||||
if err != nil {
|
||||
return err
|
||||
// Non-fatal because it may race with profile deletion
|
||||
glog.Errorf("Failed to load profile %q: %v", pName, err)
|
||||
failed = append(failed, pName)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, n := range c.Nodes {
|
||||
m := driver.MachineName(*c, n)
|
||||
|
||||
status, err := Status(api, m)
|
||||
if err != nil {
|
||||
glog.Warningf("skipping loading cache for profile %s", pName)
|
||||
glog.Errorf("error getting status for %s: %v", pName, err)
|
||||
continue // try next machine
|
||||
failed = append(failed, pName)
|
||||
continue
|
||||
}
|
||||
|
||||
if status == state.Running.String() { // the not running hosts will load on next start
|
||||
h, err := api.Load(m)
|
||||
if err != nil {
|
||||
return err
|
||||
glog.Errorf("Failed to load machine %q: %v", m, err)
|
||||
failed = append(failed, pName)
|
||||
continue
|
||||
}
|
||||
cr, err := CommandRunner(h)
|
||||
if err != nil {
|
||||
|
@ -195,12 +211,18 @@ func CacheAndLoadImages(images []string) error {
|
|||
}
|
||||
err = LoadImages(c, cr, images, constants.ImageCacheDir)
|
||||
if err != nil {
|
||||
failed = append(failed, pName)
|
||||
glog.Warningf("Failed to load cached images for profile %s. make sure the profile is running. %v", pName, err)
|
||||
}
|
||||
succeeded = append(succeeded, pName)
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
|
||||
glog.Infof("succeeded pushing to: %s", strings.Join(succeeded, " "))
|
||||
glog.Infof("failed pushing to: %s", strings.Join(failed, " "))
|
||||
// Live pushes are not considered a failure
|
||||
return nil
|
||||
}
|
||||
|
||||
// transferAndLoadImage transfers and loads a single image from the cache
|
||||
|
|
|
@ -39,9 +39,8 @@ func Add(cc *config.ClusterConfig, n config.Node) error {
|
|||
return errors.Wrap(err, "save node")
|
||||
}
|
||||
|
||||
// TODO: Start should return an error rather than calling exit!
|
||||
Start(*cc, n, nil, false)
|
||||
return nil
|
||||
_, err := Start(*cc, n, nil, false)
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete stops and deletes the given node from the given cluster
|
||||
|
|
|
@ -65,7 +65,7 @@ const (
|
|||
)
|
||||
|
||||
// Start spins up a guest and starts the kubernetes node.
|
||||
func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool, apiServer bool) *kubeconfig.Settings {
|
||||
func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool, apiServer bool) (*kubeconfig.Settings, error) {
|
||||
cp := ""
|
||||
if apiServer {
|
||||
cp = "control plane "
|
||||
|
@ -100,7 +100,7 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo
|
|||
|
||||
sv, err := util.ParseKubernetesVersion(n.KubernetesVersion)
|
||||
if err != nil {
|
||||
exit.WithError("Failed to parse kubernetes version", err)
|
||||
return nil, errors.Wrap(err, "Failed to parse kubernetes version")
|
||||
}
|
||||
|
||||
// configure the runtime (docker, containerd, crio)
|
||||
|
@ -113,7 +113,7 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo
|
|||
// Must be written before bootstrap, otherwise health checks may flake due to stale IP
|
||||
kcs = setupKubeconfig(host, &cc, &n, cc.Name)
|
||||
if err != nil {
|
||||
exit.WithError("Failed to setup kubeconfig", err)
|
||||
return nil, errors.Wrap(err, "Failed to setup kubeconfig")
|
||||
}
|
||||
|
||||
// setup kubeadm (must come after setupKubeconfig)
|
||||
|
@ -125,16 +125,16 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo
|
|||
|
||||
// write the kubeconfig to the file system after everything required (like certs) are created by the bootstrapper
|
||||
if err := kubeconfig.Update(kcs); err != nil {
|
||||
exit.WithError("Failed to update kubeconfig file.", err)
|
||||
return nil, errors.Wrap(err, "Failed to update kubeconfig file.")
|
||||
}
|
||||
} else {
|
||||
bs, err = cluster.Bootstrapper(machineAPI, viper.GetString(cmdcfg.Bootstrapper), cc, n)
|
||||
if err != nil {
|
||||
exit.WithError("Failed to get bootstrapper", err)
|
||||
return nil, errors.Wrap(err, "Failed to get bootstrapper")
|
||||
}
|
||||
|
||||
if err = bs.SetupCerts(cc.KubernetesConfig, n); err != nil {
|
||||
exit.WithError("setting up certs", err)
|
||||
return nil, errors.Wrap(err, "setting up certs")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -159,34 +159,34 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo
|
|||
// Skip pre-existing, because we already waited for health
|
||||
if viper.GetBool(waitUntilHealthy) && !preExists {
|
||||
if err := bs.WaitForNode(cc, n, viper.GetDuration(waitTimeout)); err != nil {
|
||||
exit.WithError("Wait failed", err)
|
||||
return nil, errors.Wrap(err, "Wait failed")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err := bs.UpdateNode(cc, n, cr); err != nil {
|
||||
exit.WithError("Updating node", err)
|
||||
return nil, errors.Wrap(err, "Updating node")
|
||||
}
|
||||
|
||||
cp, err := config.PrimaryControlPlane(&cc)
|
||||
if err != nil {
|
||||
exit.WithError("Getting primary control plane", err)
|
||||
return nil, errors.Wrap(err, "Getting primary control plane")
|
||||
}
|
||||
cpBs, err := cluster.Bootstrapper(machineAPI, viper.GetString(cmdcfg.Bootstrapper), cc, cp)
|
||||
if err != nil {
|
||||
exit.WithError("Getting bootstrapper", err)
|
||||
return nil, errors.Wrap(err, "Getting bootstrapper")
|
||||
}
|
||||
|
||||
joinCmd, err := cpBs.GenerateToken(cc)
|
||||
if err != nil {
|
||||
exit.WithError("generating join token", err)
|
||||
return nil, errors.Wrap(err, "generating join token")
|
||||
}
|
||||
|
||||
if err = bs.JoinCluster(cc, n, joinCmd); err != nil {
|
||||
exit.WithError("joining cluster", err)
|
||||
return nil, errors.Wrap(err, "joining cluster")
|
||||
}
|
||||
}
|
||||
|
||||
return kcs
|
||||
return kcs, nil
|
||||
}
|
||||
|
||||
// ConfigureRuntimes does what needs to happen to get a runtime going.
|
||||
|
@ -351,10 +351,7 @@ func startHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*hos
|
|||
return host, exists
|
||||
}
|
||||
|
||||
out.T(out.FailureType, "StartHost failed again: {{.error}}", out.V{"error": err})
|
||||
out.T(out.Workaround, `Run: "{{.delete}}", then "{{.start}} --alsologtostderr -v=1" to try again with more logging`,
|
||||
out.V{"delete": mustload.ExampleCmd(cc.Name, "delete"), "start": mustload.ExampleCmd(cc.Name, "start")})
|
||||
|
||||
// Don't use host.Driver to avoid nil pointer deref
|
||||
drv := cc.Driver
|
||||
exit.WithError(fmt.Sprintf(`Failed to start %s %s. "%s" may fix it.`, drv, driver.MachineType(drv), mustload.ExampleCmd(cc.Name, "start")), err)
|
||||
return host, exists
|
||||
|
|
|
@ -57,7 +57,6 @@ type match struct {
|
|||
|
||||
// Display problem metadata to the console
|
||||
func (p *Problem) Display() {
|
||||
out.ErrT(out.FailureType, "Error: [{{.id}}] {{.error}}", out.V{"id": p.ID, "error": p.Err})
|
||||
out.ErrT(out.Tip, "Suggestion: {{.advice}}", out.V{"advice": translate.T(p.Advice)})
|
||||
if p.URL != "" {
|
||||
out.ErrT(out.Documentation, "Documentation: {{.url}}", out.V{"url": p.URL})
|
||||
|
@ -65,6 +64,12 @@ func (p *Problem) Display() {
|
|||
if len(p.Issues) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if len(p.Issues) == 1 {
|
||||
out.ErrT(out.Issues, "Related issue: {{.url}}", out.V{"url": fmt.Sprintf("%s/%d", issueBase, p.Issues[0])})
|
||||
return
|
||||
}
|
||||
|
||||
out.ErrT(out.Issues, "Related issues:")
|
||||
issues := p.Issues
|
||||
if len(issues) > 3 {
|
||||
|
|
|
@ -44,7 +44,6 @@ func TestDisplay(t *testing.T) {
|
|||
problem: Problem{ID: "example", URL: "example.com", Err: fmt.Errorf("test")},
|
||||
description: "url, id and err",
|
||||
expected: `
|
||||
* Error: [example] test
|
||||
* Suggestion:
|
||||
* Documentation: example.com
|
||||
`,
|
||||
|
@ -53,7 +52,6 @@ func TestDisplay(t *testing.T) {
|
|||
problem: Problem{ID: "example", URL: "example.com", Err: fmt.Errorf("test"), Issues: []int{0, 1}, Advice: "you need a hug"},
|
||||
description: "with 2 issues and suggestion",
|
||||
expected: `
|
||||
* Error: [example] test
|
||||
* Suggestion: you need a hug
|
||||
* Documentation: example.com
|
||||
* Related issues:
|
||||
|
@ -65,7 +63,6 @@ func TestDisplay(t *testing.T) {
|
|||
problem: Problem{ID: "example", URL: "example.com", Err: fmt.Errorf("test"), Issues: []int{0, 1}},
|
||||
description: "with 2 issues",
|
||||
expected: `
|
||||
* Error: [example] test
|
||||
* Suggestion:
|
||||
* Documentation: example.com
|
||||
* Related issues:
|
||||
|
@ -78,7 +75,6 @@ func TestDisplay(t *testing.T) {
|
|||
problem: Problem{ID: "example", URL: "example.com", Err: fmt.Errorf("test"), Issues: []int{0, 1, 2, 3, 4, 5}},
|
||||
description: "with 6 issues",
|
||||
expected: `
|
||||
* Error: [example] test
|
||||
* Suggestion:
|
||||
* Documentation: example.com
|
||||
* Related issues:
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
"'none' driver does not support 'minikube podman-env' command": "",
|
||||
"'none' driver does not support 'minikube ssh' command": "",
|
||||
"'{{.driver}}' driver reported an issue: {{.error}}": "",
|
||||
"- {{.profile}}": "",
|
||||
"A VPN or firewall is interfering with HTTP access to the minikube VM. Alternatively, try a different VM driver: https://minikube.sigs.k8s.io/docs/start/": "",
|
||||
"A firewall is blocking Docker the minikube VM from reaching the internet. You may need to configure it to use a proxy.": "",
|
||||
"A firewall is interfering with minikube's ability to make outgoing HTTPS requests. You may need to change the value of the HTTPS_PROXY environment variable.": "",
|
||||
|
@ -147,6 +146,7 @@
|
|||
"Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}": "Fehler: Sie haben Kubernetes v{{.new}} ausgewählt, aber auf dem vorhandenen Cluster für Ihr Profil wird Kubernetes v{{.old}} ausgeführt. Zerstörungsfreie Downgrades werden nicht unterstützt. Sie können jedoch mit einer der folgenden Optionen fortfahren:\n* Erstellen Sie den Cluster mit Kubernetes v{{.new}} neu: Führen Sie \"minikube delete {{.profile}}\" und dann \"minikube start {{.profile}} - kubernetes-version = {{.new}}\" aus.\n* Erstellen Sie einen zweiten Cluster mit Kubernetes v{{.new}}: Führen Sie \"minikube start -p \u003cnew name\u003e --kubernetes-version = {{.new}}\" aus.\n* Verwenden Sie den vorhandenen Cluster mit Kubernetes v {{.old}} oder höher: Führen Sie \"minikube start {{.profile}} --kubernetes-version = {{.old}}\" aus.",
|
||||
"Error: [{{.id}}] {{.error}}": "",
|
||||
"Examples": "",
|
||||
"Executing \"{{.command}}\" took an unusually long time: {{.duration}}": "",
|
||||
"Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'": "",
|
||||
"Exiting": "Wird beendet",
|
||||
"Exiting.": "",
|
||||
|
@ -184,6 +184,7 @@
|
|||
"Failed to stop node {{.name}}": "",
|
||||
"Failed to update cluster": "",
|
||||
"Failed to update config": "",
|
||||
"Failed to update kubeconfig file.": "",
|
||||
"Failed unmount: {{.error}}": "",
|
||||
"File permissions used for the mount": "",
|
||||
"Filter to use only VM Drivers": "",
|
||||
|
@ -269,7 +270,6 @@
|
|||
"Networking and Connectivity Commands:": "",
|
||||
"No minikube profile was found. You can create one using `minikube start`.": "",
|
||||
"Node \"{{.node_name}}\" stopped.": "",
|
||||
"Node may be unable to resolve external DNS records": "",
|
||||
"Node operations": "",
|
||||
"Node {{.name}} was successfully deleted.": "",
|
||||
"Node {{.nodeName}} does not exist.": "",
|
||||
|
@ -331,7 +331,8 @@
|
|||
"Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommended}}MB. Kubernetes may crash unexpectedly.": "",
|
||||
"Requested memory allocation {{.requested_size}} is less than the minimum allowed of {{.minimum_size}}": "Die angeforderte Speicherzuweisung {{.requested_size}} liegt unter dem zulässigen Mindestwert von {{.minimum_size}}.",
|
||||
"Requested memory allocation {{.requested}}MB is less than the usable minimum of {{.minimum}}MB": "",
|
||||
"Retarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "",
|
||||
"Restarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "",
|
||||
"Restarting the {{.name}} service may improve performance.": "",
|
||||
"Retrieve the ssh identity key path of the specified cluster": "",
|
||||
"Retrieve the ssh identity key path of the specified cluster.": "",
|
||||
"Retrieves the IP address of the running cluster": "",
|
||||
|
@ -362,6 +363,7 @@
|
|||
"Show only log entries which point to known problems": "",
|
||||
"Show only the most recent journal entries, and continuously print new entries as they are appended to the journal.": "",
|
||||
"Skipped switching kubectl context for {{.profile_name}} because --keep-context was set.": "",
|
||||
"Sorry, Kubernetes v{{.k8sVersion}} requires conntrack to be installed in root's path": "",
|
||||
"Sorry, Kubernetes {{.version}} is not supported by this release of minikube": "",
|
||||
"Sorry, completion support is not yet implemented for {{.name}}": "",
|
||||
"Sorry, the kubeadm.{{.parameter_name}} parameter is currently not supported by --extra-config": "Leider wird der Parameter kubeadm.{{.parameter_name}} momentan von --extra-config nicht unterstützt.",
|
||||
|
@ -376,6 +378,7 @@
|
|||
"StartHost failed again: {{.error}}": "",
|
||||
"StartHost failed, but will try again: {{.error}}": "",
|
||||
"Starting tunnel for service {{.service}}.": "",
|
||||
"Starting {{.controlPlane}}node {{.name}} in cluster {{.cluster}}": "",
|
||||
"Starts a local kubernetes cluster": "Startet einen lokalen Kubernetes-Cluster",
|
||||
"Starts a node.": "",
|
||||
"Starts an existing stopped node in a cluster.": "",
|
||||
|
@ -441,7 +444,6 @@
|
|||
"The node to get logs from. Defaults to the primary control plane.": "",
|
||||
"The node to ssh into. Defaults to the primary control plane.": "",
|
||||
"The none driver is not compatible with multi-node clusters.": "",
|
||||
"The none driver requires conntrack to be installed for kubernetes version {{.k8sVersion}}": "",
|
||||
"The number of bytes to use for 9p packet payload": "",
|
||||
"The number of nodes to spin up. Defaults to 1.": "",
|
||||
"The output format. One of 'json', 'table'": "",
|
||||
|
@ -465,6 +467,7 @@
|
|||
"This will keep the existing kubectl context and will create a minikube context.": "Dadurch wird der vorhandene Kubectl-Kontext beibehalten und ein minikube-Kontext erstellt.",
|
||||
"This will start the mount daemon and automatically mount files into minikube": "Dadurch wird der Mount-Daemon gestartet und die Dateien werden automatisch in minikube geladen",
|
||||
"This will start the mount daemon and automatically mount files into minikube.": "",
|
||||
"This {{.type}} is having trouble accessing https://{{.repository}}": "",
|
||||
"Tip: To remove this root owned cluster, run: sudo {{.cmd}}": "",
|
||||
"Tip: To remove this root owned cluster, run: sudo {{.cmd}} delete": "Tipp: Um diesen Root-Cluster zu entfernen, führen Sie Folgendes aus: sudo {{.cmd}} delete",
|
||||
"To connect to this cluster, use: kubectl --context={{.name}}": "Verwenden Sie zum Herstellen einer Verbindung zu diesem Cluster: kubectl --context = {{.name}}",
|
||||
|
@ -473,6 +476,7 @@
|
|||
"To disable this notice, run: 'minikube config set WantUpdateNotification false'\\n": "",
|
||||
"To fix this, run: {{.command}}": "",
|
||||
"To proceed, either:\n\n1) Delete the existing \"{{.name}}\" cluster using: '{{.delcommand}}'\n\n* or *\n\n2) Start the existing \"{{.name}}\" cluster using: '{{.command}} --driver={{.old}}'": "",
|
||||
"To pull new external images, you may need to configure a proxy: https://minikube.sigs.k8s.io/docs/reference/networking/proxy/": "",
|
||||
"To see addons list for other profiles use: `minikube addons -p name list`": "",
|
||||
"To start minikube with HyperV Powershell must be in your PATH`": "",
|
||||
"To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:": "Möglicherweise müssen Sie Kubectl- oder minikube-Befehle verschieben, um sie als eigenen Nutzer zu verwenden. Um beispielsweise Ihre eigenen Einstellungen zu überschreiben, führen Sie aus:",
|
||||
|
@ -506,7 +510,6 @@
|
|||
"Unable to pull images, which may be OK: {{.error}}": "Bilder können nicht abgerufen werden, was möglicherweise kein Problem darstellt: {{.error}}",
|
||||
"Unable to remove machine directory": "",
|
||||
"Unable to restart cluster, will reset it: {{.error}}": "",
|
||||
"Unable to start VM after repeated tries. Please try {{'minikube delete' if possible": "",
|
||||
"Unable to stop VM": "",
|
||||
"Unable to update {{.driver}} driver: {{.error}}": "",
|
||||
"Unable to verify SSH connectivity: {{.error}}. Will retry...": "",
|
||||
|
@ -542,7 +545,6 @@
|
|||
"Using the {{.driver}} driver based on existing profile": "",
|
||||
"Using the {{.driver}} driver based on user configuration": "",
|
||||
"VM driver is one of: %v": "VM-Treiber ist einer von: %v",
|
||||
"VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository": "",
|
||||
"Validation unable to parse disk size '{{.diskSize}}': {{.error}}": "",
|
||||
"Verify that your HTTP_PROXY and HTTPS_PROXY environment variables are set correctly.": "",
|
||||
"Verify the IP address of the running cluster in kubeconfig.": "",
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
"'none' driver does not support 'minikube podman-env' command": "",
|
||||
"'none' driver does not support 'minikube ssh' command": "",
|
||||
"'{{.driver}}' driver reported an issue: {{.error}}": "",
|
||||
"- {{.profile}}": "",
|
||||
"A VPN or firewall is interfering with HTTP access to the minikube VM. Alternatively, try a different VM driver: https://minikube.sigs.k8s.io/docs/start/": "",
|
||||
"A firewall is blocking Docker the minikube VM from reaching the internet. You may need to configure it to use a proxy.": "",
|
||||
"A firewall is interfering with minikube's ability to make outgoing HTTPS requests. You may need to change the value of the HTTPS_PROXY environment variable.": "",
|
||||
|
@ -148,6 +147,7 @@
|
|||
"Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}": "Error: Has seleccionado Kubernetes {{.new}}, pero el clúster de tu perfil utiliza la versión {{.old}}. No se puede cambiar a una versión inferior sin eliminar todos los datos y recursos pertinentes, pero dispones de las siguientes opciones para continuar con la operación:\n* Volver a crear el clúster con Kubernetes {{.new}}: ejecuta \"minikube delete {{.profile}}\" y, luego, \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Crear un segundo clúster con Kubernetes {{.new}}: ejecuta \"minikube start -p \u003cnuevo nombre\u003e --kubernetes-version={{.new}}\"\n* Reutilizar el clúster actual con Kubernetes {{.old}} o una versión posterior: ejecuta \"minikube start {{.profile}} --kubernetes-version={{.old}}",
|
||||
"Error: [{{.id}}] {{.error}}": "",
|
||||
"Examples": "",
|
||||
"Executing \"{{.command}}\" took an unusually long time: {{.duration}}": "",
|
||||
"Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'": "",
|
||||
"Exiting": "Saliendo",
|
||||
"Exiting.": "",
|
||||
|
@ -185,6 +185,7 @@
|
|||
"Failed to stop node {{.name}}": "",
|
||||
"Failed to update cluster": "",
|
||||
"Failed to update config": "",
|
||||
"Failed to update kubeconfig file.": "",
|
||||
"Failed unmount: {{.error}}": "",
|
||||
"File permissions used for the mount": "",
|
||||
"Filter to use only VM Drivers": "",
|
||||
|
@ -270,7 +271,6 @@
|
|||
"Networking and Connectivity Commands:": "",
|
||||
"No minikube profile was found. You can create one using `minikube start`.": "",
|
||||
"Node \"{{.node_name}}\" stopped.": "",
|
||||
"Node may be unable to resolve external DNS records": "",
|
||||
"Node operations": "",
|
||||
"Node {{.name}} was successfully deleted.": "",
|
||||
"Node {{.nodeName}} does not exist.": "",
|
||||
|
@ -332,7 +332,8 @@
|
|||
"Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommended}}MB. Kubernetes may crash unexpectedly.": "",
|
||||
"Requested memory allocation {{.requested_size}} is less than the minimum allowed of {{.minimum_size}}": "El valor de la asignación de memoria de {{.requested_size}} solicitada es inferior al valor mínimo de {{.minimum_size}}",
|
||||
"Requested memory allocation {{.requested}}MB is less than the usable minimum of {{.minimum}}MB": "",
|
||||
"Retarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "",
|
||||
"Restarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "",
|
||||
"Restarting the {{.name}} service may improve performance.": "",
|
||||
"Retrieve the ssh identity key path of the specified cluster": "",
|
||||
"Retrieve the ssh identity key path of the specified cluster.": "",
|
||||
"Retrieves the IP address of the running cluster": "",
|
||||
|
@ -363,6 +364,7 @@
|
|||
"Show only log entries which point to known problems": "",
|
||||
"Show only the most recent journal entries, and continuously print new entries as they are appended to the journal.": "",
|
||||
"Skipped switching kubectl context for {{.profile_name}} because --keep-context was set.": "",
|
||||
"Sorry, Kubernetes v{{.k8sVersion}} requires conntrack to be installed in root's path": "",
|
||||
"Sorry, Kubernetes {{.version}} is not supported by this release of minikube": "",
|
||||
"Sorry, completion support is not yet implemented for {{.name}}": "",
|
||||
"Sorry, the kubeadm.{{.parameter_name}} parameter is currently not supported by --extra-config": "De momento, --extra-config no admite el parámetro kubeadm.{{.parameter_name}}",
|
||||
|
@ -377,6 +379,7 @@
|
|||
"StartHost failed again: {{.error}}": "",
|
||||
"StartHost failed, but will try again: {{.error}}": "",
|
||||
"Starting tunnel for service {{.service}}.": "",
|
||||
"Starting {{.controlPlane}}node {{.name}} in cluster {{.cluster}}": "",
|
||||
"Starts a local kubernetes cluster": "Inicia un clúster de Kubernetes local",
|
||||
"Starts a node.": "",
|
||||
"Starts an existing stopped node in a cluster.": "",
|
||||
|
@ -442,7 +445,6 @@
|
|||
"The node to get logs from. Defaults to the primary control plane.": "",
|
||||
"The node to ssh into. Defaults to the primary control plane.": "",
|
||||
"The none driver is not compatible with multi-node clusters.": "",
|
||||
"The none driver requires conntrack to be installed for kubernetes version {{.k8sVersion}}": "",
|
||||
"The number of bytes to use for 9p packet payload": "",
|
||||
"The number of nodes to spin up. Defaults to 1.": "",
|
||||
"The output format. One of 'json', 'table'": "",
|
||||
|
@ -466,6 +468,7 @@
|
|||
"This will keep the existing kubectl context and will create a minikube context.": "Se conservará el contexto de kubectl actual y se creará uno de minikube.",
|
||||
"This will start the mount daemon and automatically mount files into minikube": "Se iniciará el daemon de activación y se activarán automáticamente los archivos en minikube",
|
||||
"This will start the mount daemon and automatically mount files into minikube.": "",
|
||||
"This {{.type}} is having trouble accessing https://{{.repository}}": "",
|
||||
"Tip: To remove this root owned cluster, run: sudo {{.cmd}}": "",
|
||||
"Tip: To remove this root owned cluster, run: sudo {{.cmd}} delete": "Para eliminar este clúster de raíz, ejecuta: sudo {{.cmd}} delete",
|
||||
"To connect to this cluster, use: kubectl --context={{.name}}": "Para conectarte a este clúster, usa: kubectl --context={{.name}}",
|
||||
|
@ -474,6 +477,7 @@
|
|||
"To disable this notice, run: 'minikube config set WantUpdateNotification false'\\n": "",
|
||||
"To fix this, run: {{.command}}": "",
|
||||
"To proceed, either:\n\n1) Delete the existing \"{{.name}}\" cluster using: '{{.delcommand}}'\n\n* or *\n\n2) Start the existing \"{{.name}}\" cluster using: '{{.command}} --driver={{.old}}'": "",
|
||||
"To pull new external images, you may need to configure a proxy: https://minikube.sigs.k8s.io/docs/reference/networking/proxy/": "",
|
||||
"To see addons list for other profiles use: `minikube addons -p name list`": "",
|
||||
"To start minikube with HyperV Powershell must be in your PATH`": "",
|
||||
"To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:": "Para usar comandos de kubectl o minikube como tu propio usuario, puede que debas reubicarlos. Por ejemplo, para sobrescribir tu configuración, ejecuta:",
|
||||
|
@ -507,7 +511,6 @@
|
|||
"Unable to pull images, which may be OK: {{.error}}": "No se ha podido recuperar imágenes, que podrían estar en buen estado: {{.error}}",
|
||||
"Unable to remove machine directory": "",
|
||||
"Unable to restart cluster, will reset it: {{.error}}": "",
|
||||
"Unable to start VM after repeated tries. Please try {{'minikube delete' if possible": "",
|
||||
"Unable to stop VM": "",
|
||||
"Unable to update {{.driver}} driver: {{.error}}": "",
|
||||
"Unable to verify SSH connectivity: {{.error}}. Will retry...": "",
|
||||
|
@ -543,7 +546,6 @@
|
|||
"Using the {{.driver}} driver based on existing profile": "",
|
||||
"Using the {{.driver}} driver based on user configuration": "",
|
||||
"VM driver is one of: %v": "El controlador de la VM es uno de los siguientes: %v",
|
||||
"VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository": "",
|
||||
"Validation unable to parse disk size '{{.diskSize}}': {{.error}}": "",
|
||||
"Verify that your HTTP_PROXY and HTTPS_PROXY environment variables are set correctly.": "",
|
||||
"Verify the IP address of the running cluster in kubeconfig.": "",
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
"'none' driver does not support 'minikube podman-env' command": "",
|
||||
"'none' driver does not support 'minikube ssh' command": "",
|
||||
"'{{.driver}}' driver reported an issue: {{.error}}": "",
|
||||
"- {{.profile}}": "",
|
||||
"A VPN or firewall is interfering with HTTP access to the minikube VM. Alternatively, try a different VM driver: https://minikube.sigs.k8s.io/docs/start/": "",
|
||||
"A firewall is blocking Docker the minikube VM from reaching the internet. You may need to configure it to use a proxy.": "",
|
||||
"A firewall is interfering with minikube's ability to make outgoing HTTPS requests. You may need to change the value of the HTTPS_PROXY environment variable.": "",
|
||||
|
@ -84,6 +83,7 @@
|
|||
"Display dashboard URL instead of opening a browser": "",
|
||||
"Display the kubernetes addons URL in the CLI instead of opening it in the default browser": "",
|
||||
"Display the kubernetes service URL in the CLI instead of opening it in the default browser": "",
|
||||
"Display values currently set in the minikube config file": "",
|
||||
"Display values currently set in the minikube config file.": "",
|
||||
"Docker inside the VM is unavailable. Try running 'minikube delete' to reset the VM.": "",
|
||||
"Docs have been saved at - {{.path}}": "",
|
||||
|
@ -144,6 +144,7 @@
|
|||
"Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}": "Erreur : Vous avez sélectionné Kubernetes v{{.new}}, mais le cluster existent pour votre profil exécute Kubernetes v{{.old}}. Les rétrogradations non-destructives ne sont pas compatibles. Toutefois, vous pouvez poursuivre le processus en réalisant l'une des trois actions suivantes :\n* Créer à nouveau le cluster en utilisant Kubernetes v{{.new}} – exécutez \"minikube delete {{.profile}}\", puis \"minikube start {{.profile}} --kubernetes-version={{.new}}\".\n* Créer un second cluster avec Kubernetes v{{.new}} – exécutez \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\".\n* Réutiliser le cluster existent avec Kubernetes v{{.old}} ou version ultérieure – exécutez \"minikube start {{.profile}} --kubernetes-version={{.old}}\".",
|
||||
"Error: [{{.id}}] {{.error}}": "",
|
||||
"Examples": "",
|
||||
"Executing \"{{.command}}\" took an unusually long time: {{.duration}}": "",
|
||||
"Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'": "",
|
||||
"Exiting": "Fermeture…",
|
||||
"Exiting.": "",
|
||||
|
@ -181,6 +182,7 @@
|
|||
"Failed to stop node {{.name}}": "",
|
||||
"Failed to update cluster": "",
|
||||
"Failed to update config": "",
|
||||
"Failed to update kubeconfig file.": "",
|
||||
"Failed unmount: {{.error}}": "",
|
||||
"File permissions used for the mount": "",
|
||||
"Filter to use only VM Drivers": "",
|
||||
|
@ -266,7 +268,6 @@
|
|||
"Networking and Connectivity Commands:": "",
|
||||
"No minikube profile was found. You can create one using `minikube start`.": "",
|
||||
"Node \"{{.node_name}}\" stopped.": "Le noeud \"{{.node_name}}\" est arrêté.",
|
||||
"Node may be unable to resolve external DNS records": "",
|
||||
"Node operations": "",
|
||||
"Node {{.name}} was successfully deleted.": "",
|
||||
"Node {{.nodeName}} does not exist.": "",
|
||||
|
@ -329,7 +330,8 @@
|
|||
"Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommended}}MB. Kubernetes may crash unexpectedly.": "",
|
||||
"Requested memory allocation {{.requested_size}} is less than the minimum allowed of {{.minimum_size}}": "L'allocation de mémoire demandée ({{.requested_size}}) est inférieure au minimum autorisé ({{.minimum_size}}).",
|
||||
"Requested memory allocation {{.requested}}MB is less than the usable minimum of {{.minimum}}MB": "",
|
||||
"Retarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "",
|
||||
"Restarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "",
|
||||
"Restarting the {{.name}} service may improve performance.": "",
|
||||
"Retrieve the ssh identity key path of the specified cluster": "",
|
||||
"Retrieve the ssh identity key path of the specified cluster.": "",
|
||||
"Retrieves the IP address of the running cluster": "",
|
||||
|
@ -360,6 +362,7 @@
|
|||
"Show only log entries which point to known problems": "",
|
||||
"Show only the most recent journal entries, and continuously print new entries as they are appended to the journal.": "",
|
||||
"Skipped switching kubectl context for {{.profile_name}} because --keep-context was set.": "",
|
||||
"Sorry, Kubernetes v{{.k8sVersion}} requires conntrack to be installed in root's path": "",
|
||||
"Sorry, Kubernetes {{.version}} is not supported by this release of minikube": "",
|
||||
"Sorry, completion support is not yet implemented for {{.name}}": "",
|
||||
"Sorry, the kubeadm.{{.parameter_name}} parameter is currently not supported by --extra-config": "Désolé, le paramètre kubeadm.{{.parameter_name}} ne peut actuellement pas être utilisé avec \"--extra-config\".",
|
||||
|
@ -374,6 +377,7 @@
|
|||
"StartHost failed again: {{.error}}": "",
|
||||
"StartHost failed, but will try again: {{.error}}": "",
|
||||
"Starting tunnel for service {{.service}}.": "",
|
||||
"Starting {{.controlPlane}}node {{.name}} in cluster {{.cluster}}": "",
|
||||
"Starts a local kubernetes cluster": "Démarre un cluster Kubernetes local.",
|
||||
"Starts a node.": "",
|
||||
"Starts an existing stopped node in a cluster.": "",
|
||||
|
@ -437,7 +441,6 @@
|
|||
"The node to get logs from. Defaults to the primary control plane.": "",
|
||||
"The node to ssh into. Defaults to the primary control plane.": "",
|
||||
"The none driver is not compatible with multi-node clusters.": "",
|
||||
"The none driver requires conntrack to be installed for kubernetes version {{.k8sVersion}}": "",
|
||||
"The number of bytes to use for 9p packet payload": "",
|
||||
"The number of nodes to spin up. Defaults to 1.": "",
|
||||
"The output format. One of 'json', 'table'": "",
|
||||
|
@ -461,6 +464,7 @@
|
|||
"This will keep the existing kubectl context and will create a minikube context.": "Cela permet de conserver le contexte kubectl existent et de créer un contexte minikube.",
|
||||
"This will start the mount daemon and automatically mount files into minikube": "Cela permet de lancer le daemon d'installation et d'installer automatiquement les fichiers dans minikube.",
|
||||
"This will start the mount daemon and automatically mount files into minikube.": "",
|
||||
"This {{.type}} is having trouble accessing https://{{.repository}}": "",
|
||||
"Tip: To remove this root owned cluster, run: sudo {{.cmd}}": "",
|
||||
"Tip: To remove this root owned cluster, run: sudo {{.cmd}} delete": "Conseil : Pour supprimer ce cluster appartenant à la racine, exécutez la commande \"sudo {{.cmd}} delete\".",
|
||||
"To connect to this cluster, use: kubectl --context={{.name}}": "Pour vous connecter à ce cluster, utilisez la commande \"kubectl --context={{.name}}\".",
|
||||
|
@ -469,6 +473,7 @@
|
|||
"To disable this notice, run: 'minikube config set WantUpdateNotification false'\\n": "",
|
||||
"To fix this, run: {{.command}}": "",
|
||||
"To proceed, either:\n\n1) Delete the existing \"{{.name}}\" cluster using: '{{.delcommand}}'\n\n* or *\n\n2) Start the existing \"{{.name}}\" cluster using: '{{.command}} --driver={{.old}}'": "",
|
||||
"To pull new external images, you may need to configure a proxy: https://minikube.sigs.k8s.io/docs/reference/networking/proxy/": "",
|
||||
"To see addons list for other profiles use: `minikube addons -p name list`": "",
|
||||
"To start minikube with HyperV Powershell must be in your PATH`": "",
|
||||
"To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:": "Pour utiliser les commandes kubectl ou minikube sous votre propre nom d'utilisateur, vous devrez peut-être les déplacer. Par exemple, pour écraser vos propres paramètres, exécutez la commande suivante :",
|
||||
|
@ -502,7 +507,6 @@
|
|||
"Unable to pull images, which may be OK: {{.error}}": "Impossible d'extraire des images, qui sont peut-être au bon format : {{.error}}",
|
||||
"Unable to remove machine directory": "",
|
||||
"Unable to restart cluster, will reset it: {{.error}}": "",
|
||||
"Unable to start VM after repeated tries. Please try {{'minikube delete' if possible": "",
|
||||
"Unable to stop VM": "",
|
||||
"Unable to update {{.driver}} driver: {{.error}}": "",
|
||||
"Unable to verify SSH connectivity: {{.error}}. Will retry...": "",
|
||||
|
@ -538,7 +542,6 @@
|
|||
"Using the {{.driver}} driver based on existing profile": "",
|
||||
"Using the {{.driver}} driver based on user configuration": "",
|
||||
"VM driver is one of: %v": "Le pilote de la VM appartient à : %v",
|
||||
"VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository": "",
|
||||
"Validation unable to parse disk size '{{.diskSize}}': {{.error}}": "",
|
||||
"Verify that your HTTP_PROXY and HTTPS_PROXY environment variables are set correctly.": "",
|
||||
"Verify the IP address of the running cluster in kubeconfig.": "",
|
||||
|
@ -663,4 +666,4 @@
|
|||
"{{.prefix}}minikube {{.version}} on {{.platform}}": "{{.prefix}}minikube {{.version}} sur {{.platform}}",
|
||||
"{{.type}} is not yet a supported filesystem. We will try anyways!": "",
|
||||
"{{.url}} is not accessible: {{.error}}": ""
|
||||
}
|
||||
}
|
|
@ -13,7 +13,6 @@
|
|||
"'none' driver does not support 'minikube podman-env' command": "",
|
||||
"'none' driver does not support 'minikube ssh' command": "「none」ドライバーは「minikube ssh」コマンドをサポートしていません",
|
||||
"'{{.driver}}' driver reported an issue: {{.error}}": "「{{.driver}}」ドライバーがエラーを報告しました: {{.error}}",
|
||||
"- {{.profile}}": "",
|
||||
"A VPN or firewall is interfering with HTTP access to the minikube VM. Alternatively, try a different VM driver: https://minikube.sigs.k8s.io/docs/start/": "",
|
||||
"A firewall is blocking Docker the minikube VM from reaching the internet. You may need to configure it to use a proxy.": "",
|
||||
"A firewall is interfering with minikube's ability to make outgoing HTTPS requests. You may need to change the value of the HTTPS_PROXY environment variable.": "",
|
||||
|
@ -153,6 +152,7 @@
|
|||
"Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}": "エラー: Kubernetes v{{.new}} が選択されましたが、使用しているプロファイルの既存クラスタで実行されているのは Kubernetes v{{.old}} です。非破壊的なダウングレードはサポートされていませんが、以下のいずれかの方法で続行できます。\n* Kubernetes v{{.new}} を使用してクラスタを再作成する: 「minikube delete {{.profile}}」を実行してから、「minikube start {{.profile}} --kubernetes-version={{.new}}」を実行します。\n* Kubernetes v{{.new}} を使用して 2 つ目のクラスタを作成する: 「minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}」を実行します。\n* Kubernetes v{{.old}} 以降を使用して既存のクラスタを再利用する: 「minikube start {{.profile}} --kubernetes-version={{.old}}」を実行します。",
|
||||
"Error: [{{.id}}] {{.error}}": "",
|
||||
"Examples": "",
|
||||
"Executing \"{{.command}}\" took an unusually long time: {{.duration}}": "",
|
||||
"Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'": "",
|
||||
"Exiting": "終了しています",
|
||||
"Exiting.": "終了しています。",
|
||||
|
@ -190,6 +190,7 @@
|
|||
"Failed to stop node {{.name}}": "",
|
||||
"Failed to update cluster": "",
|
||||
"Failed to update config": "",
|
||||
"Failed to update kubeconfig file.": "",
|
||||
"Failed unmount: {{.error}}": "",
|
||||
"File permissions used for the mount": "",
|
||||
"Filter to use only VM Drivers": "",
|
||||
|
@ -275,7 +276,6 @@
|
|||
"Networking and Connectivity Commands:": "",
|
||||
"No minikube profile was found. You can create one using `minikube start`.": "",
|
||||
"Node \"{{.node_name}}\" stopped.": "",
|
||||
"Node may be unable to resolve external DNS records": "",
|
||||
"Node operations": "",
|
||||
"Node {{.name}} was successfully deleted.": "",
|
||||
"Node {{.nodeName}} does not exist.": "",
|
||||
|
@ -337,7 +337,8 @@
|
|||
"Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommended}}MB. Kubernetes may crash unexpectedly.": "",
|
||||
"Requested memory allocation {{.requested_size}} is less than the minimum allowed of {{.minimum_size}}": "リクエストされたメモリ割り当て {{.requested_size}} が許可される最小値 {{.minimum_size}} 未満です",
|
||||
"Requested memory allocation {{.requested}}MB is less than the usable minimum of {{.minimum}}MB": "",
|
||||
"Retarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "",
|
||||
"Restarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "",
|
||||
"Restarting the {{.name}} service may improve performance.": "",
|
||||
"Retrieve the ssh identity key path of the specified cluster": "",
|
||||
"Retrieve the ssh identity key path of the specified cluster.": "",
|
||||
"Retrieves the IP address of the running cluster": "",
|
||||
|
@ -368,6 +369,7 @@
|
|||
"Show only log entries which point to known problems": "",
|
||||
"Show only the most recent journal entries, and continuously print new entries as they are appended to the journal.": "",
|
||||
"Skipped switching kubectl context for {{.profile_name}} because --keep-context was set.": "",
|
||||
"Sorry, Kubernetes v{{.k8sVersion}} requires conntrack to be installed in root's path": "",
|
||||
"Sorry, Kubernetes {{.version}} is not supported by this release of minikube": "",
|
||||
"Sorry, completion support is not yet implemented for {{.name}}": "",
|
||||
"Sorry, the kubeadm.{{.parameter_name}} parameter is currently not supported by --extra-config": "申し訳ありません。現在、kubeadm.{{.parameter_name}} パラメータは --extra-config でサポートされていません",
|
||||
|
@ -382,6 +384,7 @@
|
|||
"StartHost failed again: {{.error}}": "",
|
||||
"StartHost failed, but will try again: {{.error}}": "",
|
||||
"Starting tunnel for service {{.service}}.": "",
|
||||
"Starting {{.controlPlane}}node {{.name}} in cluster {{.cluster}}": "",
|
||||
"Starts a local kubernetes cluster": "ローカルの Kubernetes クラスタを起動します",
|
||||
"Starts a node.": "",
|
||||
"Starts an existing stopped node in a cluster.": "",
|
||||
|
@ -447,7 +450,6 @@
|
|||
"The node to get logs from. Defaults to the primary control plane.": "",
|
||||
"The node to ssh into. Defaults to the primary control plane.": "",
|
||||
"The none driver is not compatible with multi-node clusters.": "",
|
||||
"The none driver requires conntrack to be installed for kubernetes version {{.k8sVersion}}": "",
|
||||
"The number of bytes to use for 9p packet payload": "",
|
||||
"The number of nodes to spin up. Defaults to 1.": "",
|
||||
"The output format. One of 'json', 'table'": "",
|
||||
|
@ -471,6 +473,7 @@
|
|||
"This will keep the existing kubectl context and will create a minikube context.": "これにより既存の kubectl コンテキストが保持され、minikube コンテキストが作成されます。",
|
||||
"This will start the mount daemon and automatically mount files into minikube": "これによりマウント デーモンが起動し、ファイルが minikube に自動的にマウントされます",
|
||||
"This will start the mount daemon and automatically mount files into minikube.": "",
|
||||
"This {{.type}} is having trouble accessing https://{{.repository}}": "",
|
||||
"Tip: To remove this root owned cluster, run: sudo {{.cmd}}": "",
|
||||
"Tip: To remove this root owned cluster, run: sudo {{.cmd}} delete": "ヒント: この root 所有のクラスタを削除するには、「sudo {{.cmd}} delete」を実行します",
|
||||
"To connect to this cluster, use: kubectl --context={{.name}}": "このクラスタに接続するには、「kubectl --context={{.name}}」を使用します",
|
||||
|
@ -479,6 +482,7 @@
|
|||
"To disable this notice, run: 'minikube config set WantUpdateNotification false'\\n": "",
|
||||
"To fix this, run: {{.command}}": "",
|
||||
"To proceed, either:\n\n1) Delete the existing \"{{.name}}\" cluster using: '{{.delcommand}}'\n\n* or *\n\n2) Start the existing \"{{.name}}\" cluster using: '{{.command}} --driver={{.old}}'": "",
|
||||
"To pull new external images, you may need to configure a proxy: https://minikube.sigs.k8s.io/docs/reference/networking/proxy/": "",
|
||||
"To see addons list for other profiles use: `minikube addons -p name list`": "",
|
||||
"To start minikube with HyperV Powershell must be in your PATH`": "",
|
||||
"To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:": "kubectl か minikube コマンドを独自のユーザーとして使用するには、そのコマンドの再配置が必要な場合があります。たとえば、独自の設定を上書きするには、以下を実行します。",
|
||||
|
@ -512,7 +516,6 @@
|
|||
"Unable to pull images, which may be OK: {{.error}}": "イメージを pull できませんが、問題ありません。{{.error}}",
|
||||
"Unable to remove machine directory": "",
|
||||
"Unable to restart cluster, will reset it: {{.error}}": "",
|
||||
"Unable to start VM after repeated tries. Please try {{'minikube delete' if possible": "",
|
||||
"Unable to stop VM": "",
|
||||
"Unable to update {{.driver}} driver: {{.error}}": "",
|
||||
"Unable to verify SSH connectivity: {{.error}}. Will retry...": "",
|
||||
|
@ -548,7 +551,6 @@
|
|||
"Using the {{.driver}} driver based on existing profile": "",
|
||||
"Using the {{.driver}} driver based on user configuration": "",
|
||||
"VM driver is one of: %v": "VM ドライバは次のいずれかです。%v",
|
||||
"VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository": "",
|
||||
"Validation unable to parse disk size '{{.diskSize}}': {{.error}}": "",
|
||||
"Verify that your HTTP_PROXY and HTTPS_PROXY environment variables are set correctly.": "",
|
||||
"Verify the IP address of the running cluster in kubeconfig.": "",
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
"'none' driver does not support 'minikube ssh' command": "'none' 드라이버는 'minikube ssh' 커맨드를 지원하지 않습니다",
|
||||
"'{{.driver}}' driver reported an issue: {{.error}}": "'{{.driver}}' 드라이버가 다음 이슈를 기록하였습니다: {{.error}}",
|
||||
"'{{.profile}}' is not running": "'{{.profile}}' 이 실행 중이지 않습니다",
|
||||
"- {{.profile}}": "",
|
||||
"A VPN or firewall is interfering with HTTP access to the minikube VM. Alternatively, try a different VM driver: https://minikube.sigs.k8s.io/docs/start/": "",
|
||||
"A firewall is blocking Docker the minikube VM from reaching the internet. You may need to configure it to use a proxy.": "",
|
||||
"A firewall is interfering with minikube's ability to make outgoing HTTPS requests. You may need to change the value of the HTTPS_PROXY environment variable.": "",
|
||||
|
@ -154,6 +153,7 @@
|
|||
"Error writing mount pid": "",
|
||||
"Error: [{{.id}}] {{.error}}": "",
|
||||
"Examples": "예시",
|
||||
"Executing \"{{.command}}\" took an unusually long time: {{.duration}}": "",
|
||||
"Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'": "",
|
||||
"Exiting": "",
|
||||
"Exiting.": "",
|
||||
|
@ -193,6 +193,7 @@
|
|||
"Failed to stop node {{.name}}": "노드 {{.name}} 중지에 실패하였습니다",
|
||||
"Failed to update cluster": "클러스터를 수정하는 데 실패하였습니다",
|
||||
"Failed to update config": "컨피그를 수정하는 데 실패하였습니다",
|
||||
"Failed to update kubeconfig file.": "",
|
||||
"Failed unmount: {{.error}}": "마운트 해제에 실패하였습니다: {{.error}}",
|
||||
"File permissions used for the mount": "",
|
||||
"Filter to use only VM Drivers": "",
|
||||
|
@ -275,7 +276,6 @@
|
|||
"Networking and Connectivity Commands:": "",
|
||||
"No minikube profile was found. You can create one using `minikube start`.": "",
|
||||
"Node \"{{.node_name}}\" stopped.": "",
|
||||
"Node may be unable to resolve external DNS records": "",
|
||||
"Node operations": "",
|
||||
"Node {{.name}} was successfully deleted.": "",
|
||||
"Node {{.nodeName}} does not exist.": "",
|
||||
|
@ -332,7 +332,8 @@
|
|||
"Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}": "",
|
||||
"Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommended}}MB. Kubernetes may crash unexpectedly.": "",
|
||||
"Requested memory allocation {{.requested}}MB is less than the usable minimum of {{.minimum}}MB": "",
|
||||
"Retarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "",
|
||||
"Restarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "",
|
||||
"Restarting the {{.name}} service may improve performance.": "",
|
||||
"Retrieve the ssh identity key path of the specified cluster": "",
|
||||
"Retrieve the ssh identity key path of the specified cluster.": "",
|
||||
"Retrieves the IP address of the running cluster": "",
|
||||
|
@ -363,6 +364,7 @@
|
|||
"Show only log entries which point to known problems": "",
|
||||
"Show only the most recent journal entries, and continuously print new entries as they are appended to the journal.": "",
|
||||
"Skipped switching kubectl context for {{.profile_name}} because --keep-context was set.": "",
|
||||
"Sorry, Kubernetes v{{.k8sVersion}} requires conntrack to be installed in root's path": "",
|
||||
"Sorry, Kubernetes {{.version}} is not supported by this release of minikube": "죄송합니다, 쿠버네티스 {{.version}} 는 해당 minikube 버전에서 지원하지 않습니다",
|
||||
"Sorry, completion support is not yet implemented for {{.name}}": "",
|
||||
"Sorry, the kubeadm.{{.parameter_name}} parameter is currently not supported by --extra-config": "",
|
||||
|
@ -378,6 +380,7 @@
|
|||
"StartHost failed, but will try again: {{.error}}": "",
|
||||
"Starting node": "노드를 시작하는 중",
|
||||
"Starting tunnel for service {{.service}}.": "",
|
||||
"Starting {{.controlPlane}}node {{.name}} in cluster {{.cluster}}": "",
|
||||
"Starts a local kubernetes cluster": "로컬 쿠버네티스 클러스터를 시작합니다",
|
||||
"Starts a node.": "노드를 시작합니다",
|
||||
"Starts an existing stopped node in a cluster.": "클러스터의 중지된 노드를 시작합니다",
|
||||
|
@ -434,7 +437,6 @@
|
|||
"The node to get logs from. Defaults to the primary control plane.": "",
|
||||
"The node to ssh into. Defaults to the primary control plane.": "",
|
||||
"The none driver is not compatible with multi-node clusters.": "",
|
||||
"The none driver requires conntrack to be installed for kubernetes version {{.k8sVersion}}": "",
|
||||
"The number of bytes to use for 9p packet payload": "",
|
||||
"The number of nodes to spin up. Defaults to 1.": "",
|
||||
"The output format. One of 'json', 'table'": "",
|
||||
|
@ -455,12 +457,14 @@
|
|||
"This is unusual - you may want to investigate using \"{{.command}}\"": "",
|
||||
"This will keep the existing kubectl context and will create a minikube context.": "",
|
||||
"This will start the mount daemon and automatically mount files into minikube.": "",
|
||||
"This {{.type}} is having trouble accessing https://{{.repository}}": "",
|
||||
"Tip: To remove this root owned cluster, run: sudo {{.cmd}}": "",
|
||||
"To connect to this cluster, use: kubectl --context={{.name}}": "",
|
||||
"To connect to this cluster, use: kubectl --context={{.profile_name}}": "",
|
||||
"To disable this notice, run: 'minikube config set WantUpdateNotification false'\\n": "",
|
||||
"To fix this, run: {{.command}}": "",
|
||||
"To proceed, either:\n\n1) Delete the existing \"{{.name}}\" cluster using: '{{.delcommand}}'\n\n* or *\n\n2) Start the existing \"{{.name}}\" cluster using: '{{.command}} --driver={{.old}}'": "",
|
||||
"To pull new external images, you may need to configure a proxy: https://minikube.sigs.k8s.io/docs/reference/networking/proxy/": "",
|
||||
"To see addons list for other profiles use: `minikube addons -p name list`": "",
|
||||
"To start minikube with HyperV Powershell must be in your PATH`": "",
|
||||
"To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:": "",
|
||||
|
@ -494,7 +498,6 @@
|
|||
"Unable to remove machine directory": "",
|
||||
"Unable to remove machine directory: %v": "머신 디렉토리를 제거할 수 없습니다: %v",
|
||||
"Unable to restart cluster, will reset it: {{.error}}": "",
|
||||
"Unable to start VM after repeated tries. Please try {{'minikube delete' if possible": "",
|
||||
"Unable to start VM. Please investigate and run 'minikube delete' if possible": "가상 머신을 시작할 수 없습니다. 확인 후 가능하면 'minikube delete' 를 실행하세요",
|
||||
"Unable to stop VM": "가상 머신을 중지할 수 없습니다",
|
||||
"Unable to update {{.driver}} driver: {{.error}}": "{{.driver}} 를 수정할 수 없습니다: {{.error}}",
|
||||
|
@ -529,7 +532,6 @@
|
|||
"Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!": "",
|
||||
"Using the {{.driver}} driver based on existing profile": "기존 프로필에 기반하여 {{.driver}} 드라이버를 사용하는 중",
|
||||
"Using the {{.driver}} driver based on user configuration": "유저 환경 설정 정보에 기반하여 {{.driver}} 드라이버를 사용하는 중",
|
||||
"VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository": "",
|
||||
"Validation unable to parse disk size '{{.diskSize}}': {{.error}}": "",
|
||||
"Verify that your HTTP_PROXY and HTTPS_PROXY environment variables are set correctly.": "",
|
||||
"Verify the IP address of the running cluster in kubeconfig.": "",
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
"'none' driver does not support 'minikube podman-env' command": "",
|
||||
"'none' driver does not support 'minikube ssh' command": "sterownik 'none' nie wspiera komendy 'minikube ssh'",
|
||||
"'{{.driver}}' driver reported an issue: {{.error}}": "",
|
||||
"- {{.profile}}": "",
|
||||
"A VPN or firewall is interfering with HTTP access to the minikube VM. Alternatively, try a different VM driver: https://minikube.sigs.k8s.io/docs/start/": "",
|
||||
"A firewall is blocking Docker the minikube VM from reaching the internet. You may need to configure it to use a proxy.": "",
|
||||
"A firewall is interfering with minikube's ability to make outgoing HTTPS requests. You may need to change the value of the HTTPS_PROXY environment variable.": "",
|
||||
|
@ -153,6 +152,7 @@
|
|||
"Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}": "Erreur : Vous avez sélectionné Kubernetes v{{.new}}, mais le cluster existent pour votre profil exécute Kubernetes v{{.old}}. Les rétrogradations non-destructives ne sont pas compatibles. Toutefois, vous pouvez poursuivre le processus en réalisant l'une des trois actions suivantes :\n* Créer à nouveau le cluster en utilisant Kubernetes v{{.new}} – exécutez \"minikube delete {{.profile}}\", puis \"minikube start {{.profile}} --kubernetes-version={{.new}}\".\n* Créer un second cluster avec Kubernetes v{{.new}} – exécutez \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\".\n* Réutiliser le cluster existent avec Kubernetes v{{.old}} ou version ultérieure – exécutez \"minikube start {{.profile}} --kubernetes-version={{.old}}\".",
|
||||
"Error: [{{.id}}] {{.error}}": "",
|
||||
"Examples": "Przykłady",
|
||||
"Executing \"{{.command}}\" took an unusually long time: {{.duration}}": "",
|
||||
"Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'": "",
|
||||
"Exiting": "",
|
||||
"Exiting.": "",
|
||||
|
@ -190,6 +190,7 @@
|
|||
"Failed to stop node {{.name}}": "",
|
||||
"Failed to update cluster": "Aktualizacja klastra nie powiodła się",
|
||||
"Failed to update config": "Aktualizacja konfiguracji nie powiodła się",
|
||||
"Failed to update kubeconfig file.": "",
|
||||
"Failed unmount: {{.error}}": "",
|
||||
"File permissions used for the mount": "",
|
||||
"Filter to use only VM Drivers": "",
|
||||
|
@ -274,7 +275,6 @@
|
|||
"Networking and Connectivity Commands:": "",
|
||||
"No minikube profile was found. You can create one using `minikube start`.": "",
|
||||
"Node \"{{.node_name}}\" stopped.": "",
|
||||
"Node may be unable to resolve external DNS records": "",
|
||||
"Node operations": "",
|
||||
"Node {{.name}} was successfully deleted.": "",
|
||||
"Node {{.nodeName}} does not exist.": "",
|
||||
|
@ -335,7 +335,8 @@
|
|||
"Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}": "",
|
||||
"Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommended}}MB. Kubernetes may crash unexpectedly.": "",
|
||||
"Requested memory allocation {{.requested}}MB is less than the usable minimum of {{.minimum}}MB": "",
|
||||
"Retarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "",
|
||||
"Restarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "",
|
||||
"Restarting the {{.name}} service may improve performance.": "",
|
||||
"Retrieve the ssh identity key path of the specified cluster": "Pozyskuje ścieżkę do klucza ssh dla wyspecyfikowanego klastra",
|
||||
"Retrieve the ssh identity key path of the specified cluster.": "Pozyskuje ścieżkę do klucza ssh dla wyspecyfikowanego klastra.",
|
||||
"Retrieves the IP address of the running cluster": "Pobiera adres IP aktualnie uruchomionego klastra",
|
||||
|
@ -366,6 +367,7 @@
|
|||
"Show only log entries which point to known problems": "Pokaż logi które wskazują na znane problemy",
|
||||
"Show only the most recent journal entries, and continuously print new entries as they are appended to the journal.": "",
|
||||
"Skipped switching kubectl context for {{.profile_name}} because --keep-context was set.": "Zignorowano zmianę kontekstu kubectl ponieważ --keep-context zostało przekazane",
|
||||
"Sorry, Kubernetes v{{.k8sVersion}} requires conntrack to be installed in root's path": "",
|
||||
"Sorry, Kubernetes {{.version}} is not supported by this release of minikube": "",
|
||||
"Sorry, completion support is not yet implemented for {{.name}}": "",
|
||||
"Sorry, the kubeadm.{{.parameter_name}} parameter is currently not supported by --extra-config": "",
|
||||
|
@ -380,6 +382,7 @@
|
|||
"StartHost failed again: {{.error}}": "",
|
||||
"StartHost failed, but will try again: {{.error}}": "",
|
||||
"Starting tunnel for service {{.service}}.": "",
|
||||
"Starting {{.controlPlane}}node {{.name}} in cluster {{.cluster}}": "",
|
||||
"Starts a local kubernetes cluster": "Uruchamianie lokalnego klastra kubernetesa",
|
||||
"Starts a node.": "",
|
||||
"Starts an existing stopped node in a cluster.": "",
|
||||
|
@ -443,7 +446,6 @@
|
|||
"The node to get logs from. Defaults to the primary control plane.": "",
|
||||
"The node to ssh into. Defaults to the primary control plane.": "",
|
||||
"The none driver is not compatible with multi-node clusters.": "",
|
||||
"The none driver requires conntrack to be installed for kubernetes version {{.k8sVersion}}": "",
|
||||
"The number of bytes to use for 9p packet payload": "",
|
||||
"The number of nodes to spin up. Defaults to 1.": "",
|
||||
"The output format. One of 'json', 'table'": "",
|
||||
|
@ -465,6 +467,7 @@
|
|||
"This is unusual - you may want to investigate using \"{{.command}}\"": "",
|
||||
"This will keep the existing kubectl context and will create a minikube context.": "",
|
||||
"This will start the mount daemon and automatically mount files into minikube.": "",
|
||||
"This {{.type}} is having trouble accessing https://{{.repository}}": "",
|
||||
"Tip: To remove this root owned cluster, run: sudo {{.cmd}}": "",
|
||||
"To connect to this cluster, use: kubectl --context={{.name}}": "Aby połączyć się z klastrem użyj: kubectl --context={{.name}}",
|
||||
"To connect to this cluster, use: kubectl --context={{.profile_name}}": "Aby połaczyć się z klastem uzyj: kubectl --context={{.profile_name}}",
|
||||
|
@ -472,6 +475,7 @@
|
|||
"To disable this notice, run: 'minikube config set WantUpdateNotification false'\\n": "",
|
||||
"To fix this, run: {{.command}}": "",
|
||||
"To proceed, either:\n\n1) Delete the existing \"{{.name}}\" cluster using: '{{.delcommand}}'\n\n* or *\n\n2) Start the existing \"{{.name}}\" cluster using: '{{.command}} --driver={{.old}}'": "",
|
||||
"To pull new external images, you may need to configure a proxy: https://minikube.sigs.k8s.io/docs/reference/networking/proxy/": "",
|
||||
"To see addons list for other profiles use: `minikube addons -p name list`": "",
|
||||
"To start minikube with HyperV Powershell must be in your PATH`": "Aby uruchomić minikube z HyperV Powershell musi znajdować się w zmiennej PATH",
|
||||
"To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:": "",
|
||||
|
@ -504,7 +508,6 @@
|
|||
"Unable to remove machine directory": "",
|
||||
"Unable to restart cluster, will reset it: {{.error}}": "",
|
||||
"Unable to start VM": "Nie można uruchomić maszyny wirtualnej",
|
||||
"Unable to start VM after repeated tries. Please try {{'minikube delete' if possible": "",
|
||||
"Unable to stop VM": "Nie można zatrzymać maszyny wirtualnej",
|
||||
"Unable to update {{.driver}} driver: {{.error}}": "",
|
||||
"Unable to verify SSH connectivity: {{.error}}. Will retry...": "",
|
||||
|
@ -539,7 +542,6 @@
|
|||
"Using the {{.driver}} driver based on existing profile": "",
|
||||
"Using the {{.driver}} driver based on user configuration": "",
|
||||
"VM driver is one of: %v": "Sterownik wirtualnej maszyny to jeden z: %v",
|
||||
"VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository": "",
|
||||
"Validation unable to parse disk size '{{.diskSize}}': {{.error}}": "",
|
||||
"Verify that your HTTP_PROXY and HTTPS_PROXY environment variables are set correctly.": "Weryfikuję czy zmienne HTTP_PROXY i HTTPS_PROXY sa ustawione poprawnie",
|
||||
"Verify the IP address of the running cluster in kubeconfig.": "Weryfikuję adres IP działającego klastra w kubeconfig",
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
"'none' driver does not support 'minikube podman-env' command": "",
|
||||
"'none' driver does not support 'minikube ssh' command": "'none' 驱动不支持 'minikube ssh' 命令",
|
||||
"'{{.driver}}' driver reported an issue: {{.error}}": "'{{.driver}}' 驱动程序报告了一个问题: {{.error}}",
|
||||
"- {{.profile}}": "",
|
||||
"A VPN or firewall is interfering with HTTP access to the minikube VM. Alternatively, try a different VM driver: https://minikube.sigs.k8s.io/docs/start/": "VPN 或者防火墙正在干扰对 minikube 虚拟机的 HTTP 访问。或者,您可以使用其它的虚拟机驱动:https://minikube.sigs.k8s.io/docs/start/",
|
||||
"A firewall is blocking Docker the minikube VM from reaching the internet. You may need to configure it to use a proxy.": "防火墙正在阻止 minikube 虚拟机中的 Docker 访问互联网。您可能需要对其进行配置为使用代理",
|
||||
"A firewall is blocking Docker within the minikube VM from reaching the internet. You may need to configure it to use a proxy.": "防火墙正在阻止 minikube 虚拟机中的 Docker 访问互联网,您可能需要对其进行配置为使用代理",
|
||||
|
@ -193,6 +192,7 @@
|
|||
"Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}": "错误:您已选择 Kubernetes v{{.new}},但您的配置文件的现有集群正在运行 Kubernetes v{{.old}}。非破坏性降级不受支持,但若要继续操作,您可以执行以下选项之一:\n* 使用 Kubernetes v{{.new}} 重新创建现有集群:运行“minikube delete {{.profile}}”,然后运行“minikube start {{.profile}} --kubernetes-version={{.new}}”\n* 使用 Kubernetes v{{.new}} 再创建一个集群:运行“minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}”\n* 通过 Kubernetes v{{.old}} 或更高版本重复使用现有集群:运行“minikube start {{.profile}} --kubernetes-version={{.old}}”",
|
||||
"Error: [{{.id}}] {{.error}}": "错误:[{{.id}}] {{.error}}",
|
||||
"Examples": "示例",
|
||||
"Executing \"{{.command}}\" took an unusually long time: {{.duration}}": "",
|
||||
"Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'": "",
|
||||
"Exiting": "正在退出",
|
||||
"Exiting due to driver incompatibility": "由于驱动程序不兼容而退出",
|
||||
|
@ -235,6 +235,7 @@
|
|||
"Failed to stop node {{.name}}": "",
|
||||
"Failed to update cluster": "更新 cluster 失败",
|
||||
"Failed to update config": "更新 config 失败",
|
||||
"Failed to update kubeconfig file.": "",
|
||||
"Failed unmount: {{.error}}": "unmount 失败:{{.error}}",
|
||||
"File permissions used for the mount": "用于 mount 的文件权限",
|
||||
"Filter to use only VM Drivers": "",
|
||||
|
@ -323,7 +324,6 @@
|
|||
"Networking and Connectivity Commands:": "网络和连接命令:",
|
||||
"No minikube profile was found. You can create one using `minikube start`.": "",
|
||||
"Node \"{{.node_name}}\" stopped.": "",
|
||||
"Node may be unable to resolve external DNS records": "",
|
||||
"Node operations": "",
|
||||
"Node {{.name}} was successfully deleted.": "",
|
||||
"Node {{.nodeName}} does not exist.": "",
|
||||
|
@ -387,7 +387,8 @@
|
|||
"Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommended}}MB. Kubernetes may crash unexpectedly.": "",
|
||||
"Requested memory allocation {{.requested_size}} is less than the minimum allowed of {{.minimum_size}}": "请求的内存分配 {{.requested_size}} 小于允许的 {{.minimum_size}} 最小值",
|
||||
"Requested memory allocation {{.requested}}MB is less than the usable minimum of {{.minimum}}MB": "",
|
||||
"Retarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "",
|
||||
"Restarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "",
|
||||
"Restarting the {{.name}} service may improve performance.": "",
|
||||
"Retrieve the ssh identity key path of the specified cluster": "检索指定集群的 ssh 密钥路径",
|
||||
"Retrieve the ssh identity key path of the specified cluster.": "检索指定集群的 ssh 密钥路径。",
|
||||
"Retrieves the IP address of the running cluster": "检索正在运行的群集的 IP 地址",
|
||||
|
@ -420,6 +421,7 @@
|
|||
"Show only log entries which point to known problems": "",
|
||||
"Show only the most recent journal entries, and continuously print new entries as they are appended to the journal.": "",
|
||||
"Skipped switching kubectl context for {{.profile_name}} because --keep-context was set.": "",
|
||||
"Sorry, Kubernetes v{{.k8sVersion}} requires conntrack to be installed in root's path": "",
|
||||
"Sorry, Kubernetes {{.version}} is not supported by this release of minikube": "",
|
||||
"Sorry, completion support is not yet implemented for {{.name}}": "",
|
||||
"Sorry, the kubeadm.{{.parameter_name}} parameter is currently not supported by --extra-config": "抱歉,--extra-config 目前不支持 kubeadm.{{.parameter_name}} 参数",
|
||||
|
@ -434,6 +436,7 @@
|
|||
"StartHost failed again: {{.error}}": "",
|
||||
"StartHost failed, but will try again: {{.error}}": "",
|
||||
"Starting tunnel for service {{.service}}.": "",
|
||||
"Starting {{.controlPlane}}node {{.name}} in cluster {{.cluster}}": "",
|
||||
"Starts a local kubernetes cluster": "启动本地 kubernetes 集群",
|
||||
"Starts a node.": "",
|
||||
"Starts an existing stopped node in a cluster.": "",
|
||||
|
@ -503,7 +506,6 @@
|
|||
"The node to get logs from. Defaults to the primary control plane.": "",
|
||||
"The node to ssh into. Defaults to the primary control plane.": "",
|
||||
"The none driver is not compatible with multi-node clusters.": "",
|
||||
"The none driver requires conntrack to be installed for kubernetes version {{.k8sVersion}}": "",
|
||||
"The number of bytes to use for 9p packet payload": "",
|
||||
"The number of nodes to spin up. Defaults to 1.": "",
|
||||
"The output format. One of 'json', 'table'": "输出的格式。'json' 或者 'table'",
|
||||
|
@ -527,6 +529,7 @@
|
|||
"This will keep the existing kubectl context and will create a minikube context.": "这将保留现有 kubectl 上下文并创建 minikube 上下文。",
|
||||
"This will start the mount daemon and automatically mount files into minikube": "这将启动装载守护进程并将文件自动装载到 minikube 中",
|
||||
"This will start the mount daemon and automatically mount files into minikube.": "",
|
||||
"This {{.type}} is having trouble accessing https://{{.repository}}": "",
|
||||
"Tip: To remove this root owned cluster, run: sudo {{.cmd}}": "",
|
||||
"Tip: To remove this root owned cluster, run: sudo {{.cmd}} delete": "提示:要移除这个由根用户拥有的集群,请运行 sudo {{.cmd}} delete",
|
||||
"To connect to this cluster, use: kubectl --context={{.name}}": "如需连接到此集群,请使用 kubectl --context={{.name}}",
|
||||
|
@ -535,6 +538,7 @@
|
|||
"To disable this notice, run: 'minikube config set WantUpdateNotification false'\\n": "",
|
||||
"To fix this, run: {{.command}}": "",
|
||||
"To proceed, either:\n\n1) Delete the existing \"{{.name}}\" cluster using: '{{.delcommand}}'\n\n* or *\n\n2) Start the existing \"{{.name}}\" cluster using: '{{.command}} --driver={{.old}}'": "",
|
||||
"To pull new external images, you may need to configure a proxy: https://minikube.sigs.k8s.io/docs/reference/networking/proxy/": "",
|
||||
"To see addons list for other profiles use: `minikube addons -p name list`": "",
|
||||
"To start minikube with HyperV Powershell must be in your PATH`": "",
|
||||
"To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:": "如需以您自己的用户身份使用 kubectl 或 minikube 命令,您可能需要重新定位该命令。例如,如需覆盖您的自定义设置,请运行:",
|
||||
|
@ -570,7 +574,6 @@
|
|||
"Unable to pull images, which may be OK: {{.error}}": "无法拉取镜像,有可能是正常状况:{{.error}}",
|
||||
"Unable to remove machine directory": "",
|
||||
"Unable to restart cluster, will reset it: {{.error}}": "",
|
||||
"Unable to start VM after repeated tries. Please try {{'minikube delete' if possible": "",
|
||||
"Unable to start VM. Please investigate and run 'minikube delete' if possible": "无法启动虚拟机。可能的话请检查后执行 'minikube delete'",
|
||||
"Unable to stop VM": "无法停止虚拟机",
|
||||
"Unable to update {{.driver}} driver: {{.error}}": "",
|
||||
|
|
Loading…
Reference in New Issue