Merge branch 'master' of github.com:kubernetes/minikube into improve-localpath-test-coverage
commit
83504badbb
|
|
@ -81,13 +81,27 @@ jobs:
|
|||
GOPOGH_RESULT: ""
|
||||
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
|
||||
runs-on: ubuntu-16.04
|
||||
steps:
|
||||
steps:
|
||||
- name: Install kubectl
|
||||
shell: bash
|
||||
run: |
|
||||
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
|
||||
sudo install kubectl /usr/local/bin/kubectl
|
||||
kubectl version --client=true
|
||||
- name: Docker Info
|
||||
shell: bash
|
||||
run: |
|
||||
docker info || true
|
||||
echo "--------------------------"
|
||||
docker version || true
|
||||
echo "--------------------------"
|
||||
docker info || true
|
||||
echo "--------------------------"
|
||||
docker system df || true
|
||||
echo "--------------------------"
|
||||
docker system info || true
|
||||
echo "--------------------------"
|
||||
docker ps || true
|
||||
echo "--------------------------"
|
||||
- name: Install lz4
|
||||
shell: bash
|
||||
run: |
|
||||
|
|
@ -157,6 +171,12 @@ jobs:
|
|||
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
|
||||
needs: [build_minikube]
|
||||
steps:
|
||||
- name: Install kubectl
|
||||
shell: bash
|
||||
run: |
|
||||
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
|
||||
sudo install kubectl /usr/local/bin/kubectl
|
||||
kubectl version --client=true
|
||||
- name: Install lz4
|
||||
shell: bash
|
||||
run: |
|
||||
|
|
@ -165,9 +185,17 @@ jobs:
|
|||
- name: Docker Info
|
||||
shell: bash
|
||||
run: |
|
||||
docker info || true
|
||||
echo "--------------------------"
|
||||
docker version || true
|
||||
echo "--------------------------"
|
||||
docker info || true
|
||||
echo "--------------------------"
|
||||
docker system df || true
|
||||
echo "--------------------------"
|
||||
docker system info || true
|
||||
echo "--------------------------"
|
||||
docker ps || true
|
||||
echo "--------------------------"
|
||||
- name: Install gopogh
|
||||
shell: bash
|
||||
run: |
|
||||
|
|
@ -232,12 +260,23 @@ jobs:
|
|||
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
|
||||
runs-on: ubuntu-16.04
|
||||
steps:
|
||||
- name: Install kubectl
|
||||
shell: bash
|
||||
run: |
|
||||
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
|
||||
sudo install kubectl /usr/local/bin/kubectl
|
||||
kubectl version --client=true
|
||||
# conntrack is required for kubernetes 1.18 and higher
|
||||
- name: Install conntrack
|
||||
# socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon
|
||||
- name: Install tools for none
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get -qq -y install conntrack
|
||||
sudo apt-get -qq -y install socat
|
||||
VERSION="v1.17.0"
|
||||
curl -L https://github.com/kubernetes-sigs/cri-tools/releases/download/$VERSION/crictl-${VERSION}-linux-amd64.tar.gz --output crictl-${VERSION}-linux-amd64.tar.gz
|
||||
sudo tar zxvf crictl-$VERSION-linux-amd64.tar.gz -C /usr/local/bin
|
||||
- name: Install gopogh
|
||||
shell: bash
|
||||
run: |
|
||||
|
|
@ -302,12 +341,23 @@ jobs:
|
|||
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- name: Install kubectl
|
||||
shell: bash
|
||||
run: |
|
||||
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
|
||||
sudo install kubectl /usr/local/bin/kubectl
|
||||
kubectl version --client=true
|
||||
# conntrack is required for kubernetes 1.18 and higher
|
||||
- name: Install conntrack
|
||||
# socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon
|
||||
- name: Install tools for none
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get -qq -y install conntrack
|
||||
sudo apt-get -qq -y install socat
|
||||
VERSION="v1.17.0"
|
||||
curl -L https://github.com/kubernetes-sigs/cri-tools/releases/download/$VERSION/crictl-${VERSION}-linux-amd64.tar.gz --output crictl-${VERSION}-linux-amd64.tar.gz
|
||||
sudo tar zxvf crictl-$VERSION-linux-amd64.tar.gz -C /usr/local/bin
|
||||
- name: Install gopogh
|
||||
shell: bash
|
||||
run: |
|
||||
|
|
@ -372,11 +422,12 @@ jobs:
|
|||
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- name: Install lz4
|
||||
- name: Install kubectl
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get -qq -y install liblz4-tool
|
||||
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
|
||||
sudo install kubectl /usr/local/bin/kubectl
|
||||
kubectl version --client=true
|
||||
- name: Install podman
|
||||
shell: bash
|
||||
run: |
|
||||
|
|
|
|||
34
CHANGELOG.md
34
CHANGELOG.md
|
|
@ -1,5 +1,39 @@
|
|||
# Release Notes
|
||||
|
||||
## Version 1.9.0- 2020-03-26
|
||||
|
||||
New features & improvements
|
||||
|
||||
* Update DefaultKubernetesVersion to v1.18.0 [#7235](https://github.com/kubernetes/minikube/pull/7235)
|
||||
* Add --vm flag for users who want to autoselect only VM's [#7068](https://github.com/kubernetes/minikube/pull/7068)
|
||||
* Add 'stable' and 'latest' as valid kubernetes-version values [#7212](https://github.com/kubernetes/minikube/pull/7212)
|
||||
|
||||
* gpu addon: privileged mode no longer required [#7149](https://github.com/kubernetes/minikube/pull/7149)
|
||||
* Add sch_tbf and extend filter ipset kernel module for bandwidth shaping [#7255](https://github.com/kubernetes/minikube/pull/7255)
|
||||
* Parse --disk-size and --memory sizes with binary suffixes [#7206](https://github.com/kubernetes/minikube/pull/7206)
|
||||
|
||||
|
||||
Bug Fixes
|
||||
|
||||
* Re-initalize failed Kubernetes clusters [#7234](https://github.com/kubernetes/minikube/pull/7234)
|
||||
* do not override hostname if extraConfig is specified [#7238](https://github.com/kubernetes/minikube/pull/7238)
|
||||
* Enable HW_RANDOM_VIRTIO to fix sshd startup delays [#7208](https://github.com/kubernetes/minikube/pull/7208)
|
||||
* hyperv Delete: call StopHost before removing VM [#7160](https://github.com/kubernetes/minikube/pull/7160)
|
||||
|
||||
Huge thank you for this release towards our contributors:
|
||||
|
||||
- Anders F Björklund
|
||||
- Medya Ghazizadeh
|
||||
- Priya Wadhwa
|
||||
- Sharif Elgamal
|
||||
- Thomas Strömberg
|
||||
- Tom
|
||||
- Vincent Link
|
||||
- Yang Keao
|
||||
- Zhongcheng Lao
|
||||
- vikkyomkar
|
||||
|
||||
|
||||
## Version 1.9.0-beta.2 - 2020-03-21
|
||||
|
||||
New features & improvements
|
||||
|
|
|
|||
4
Makefile
4
Makefile
|
|
@ -15,7 +15,7 @@
|
|||
# Bump these on release - and please check ISO_VERSION for correctness.
|
||||
VERSION_MAJOR ?= 1
|
||||
VERSION_MINOR ?= 9
|
||||
VERSION_BUILD ?= 0-beta.2
|
||||
VERSION_BUILD ?= 0
|
||||
RAW_VERSION=$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_BUILD)
|
||||
VERSION ?= v$(RAW_VERSION)
|
||||
|
||||
|
|
@ -625,7 +625,7 @@ release-kvm-driver: install-kvm-driver checksum ## Release KVM Driver
|
|||
gsutil cp $(GOBIN)/docker-machine-driver-kvm2 gs://minikube/drivers/kvm/$(VERSION)/
|
||||
gsutil cp $(GOBIN)/docker-machine-driver-kvm2.sha256 gs://minikube/drivers/kvm/$(VERSION)/
|
||||
|
||||
site/themes/docsy/assets/vendor/bootstrap/package.js:
|
||||
site/themes/docsy/assets/vendor/bootstrap/package.js: ## update the website docsy theme git submodule
|
||||
git submodule update -f --init --recursive
|
||||
|
||||
out/hugo/hugo:
|
||||
|
|
|
|||
|
|
@ -200,6 +200,7 @@ func initDriverFlags() {
|
|||
startCmd.Flags().String("driver", "", fmt.Sprintf("Driver is one of: %v (defaults to auto-detect)", driver.DisplaySupportedDrivers()))
|
||||
startCmd.Flags().String("vm-driver", "", "DEPRECATED, use `driver` instead.")
|
||||
startCmd.Flags().Bool(disableDriverMounts, false, "Disables the filesystem mounts provided by the hypervisors")
|
||||
startCmd.Flags().Bool("vm", false, "Filter to use only VM Drivers")
|
||||
|
||||
// kvm2
|
||||
startCmd.Flags().String(kvmNetwork, "default", "The KVM network name. (kvm2 driver only)")
|
||||
|
|
@ -507,7 +508,7 @@ func selectDriver(existing *config.ClusterConfig) registry.DriverState {
|
|||
return ds
|
||||
}
|
||||
|
||||
pick, alts := driver.Suggest(driver.Choices())
|
||||
pick, alts := driver.Suggest(driver.Choices(viper.GetBool("vm")))
|
||||
if pick.Name == "" {
|
||||
exit.WithCodeT(exit.Config, "Unable to determine a default driver to use. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,6 +31,7 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify"
|
||||
"k8s.io/minikube/pkg/minikube/cluster"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
|
|
@ -107,7 +108,7 @@ var statusCmd = &cobra.Command{
|
|||
for _, n := range cc.Nodes {
|
||||
glog.Infof("checking status of %s ...", n.Name)
|
||||
machineName := driver.MachineName(*cc, n)
|
||||
st, err = status(api, machineName, n.ControlPlane)
|
||||
st, err = status(api, *cc, n)
|
||||
glog.Infof("%s status: %+v", machineName, st)
|
||||
|
||||
if err != nil {
|
||||
|
|
@ -150,12 +151,12 @@ func exitCode(st *Status) int {
|
|||
return c
|
||||
}
|
||||
|
||||
func status(api libmachine.API, name string, controlPlane bool) (*Status, error) {
|
||||
func status(api libmachine.API, cc config.ClusterConfig, n config.Node) (*Status, error) {
|
||||
|
||||
profile, node := driver.ClusterNameFromMachine(name)
|
||||
controlPlane := n.ControlPlane
|
||||
|
||||
st := &Status{
|
||||
Name: node,
|
||||
Name: n.Name,
|
||||
Host: Nonexistent,
|
||||
APIServer: Nonexistent,
|
||||
Kubelet: Nonexistent,
|
||||
|
|
@ -163,6 +164,7 @@ func status(api libmachine.API, name string, controlPlane bool) (*Status, error)
|
|||
Worker: !controlPlane,
|
||||
}
|
||||
|
||||
name := driver.MachineName(cc, n)
|
||||
hs, err := machine.Status(api, name)
|
||||
glog.Infof("%s host status = %q (err=%v)", name, hs, err)
|
||||
if err != nil {
|
||||
|
|
@ -205,7 +207,7 @@ func status(api libmachine.API, name string, controlPlane bool) (*Status, error)
|
|||
}
|
||||
|
||||
if st.Kubeconfig != Irrelevant {
|
||||
ok, err := kubeconfig.IsClusterInConfig(ip, profile)
|
||||
ok, err := kubeconfig.IsClusterInConfig(ip, cc.Name)
|
||||
glog.Infof("%s is in kubeconfig at ip %s: %v (err=%v)", name, ip, ok, err)
|
||||
if ok {
|
||||
st.Kubeconfig = Configured
|
||||
|
|
|
|||
|
|
@ -69,8 +69,9 @@ func runStop(cmd *cobra.Command, args []string) {
|
|||
|
||||
func stop(api libmachine.API, cluster config.ClusterConfig, n config.Node) bool {
|
||||
nonexistent := false
|
||||
stop := func() (err error) {
|
||||
machineName := driver.MachineName(cluster, n)
|
||||
machineName := driver.MachineName(cluster, n)
|
||||
|
||||
tryStop := func() (err error) {
|
||||
err = machine.StopHost(api, machineName)
|
||||
if err == nil {
|
||||
return nil
|
||||
|
|
@ -87,7 +88,7 @@ func stop(api libmachine.API, cluster config.ClusterConfig, n config.Node) bool
|
|||
}
|
||||
}
|
||||
|
||||
if err := retry.Expo(stop, 5*time.Second, 3*time.Minute, 5); err != nil {
|
||||
if err := retry.Expo(tryStop, 1*time.Second, 30*time.Second, 3); err != nil {
|
||||
exit.WithError("Unable to stop VM", err)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
# CONFIG_LOCALVERSION_AUTO is not set
|
||||
CONFIG_KERNEL_LZ4=y
|
||||
CONFIG_SYSVIPC=y
|
||||
CONFIG_POSIX_MQUEUE=y
|
||||
CONFIG_AUDIT=y
|
||||
|
|
@ -25,10 +26,10 @@ CONFIG_CPUSETS=y
|
|||
CONFIG_CGROUP_DEVICE=y
|
||||
CONFIG_CGROUP_CPUACCT=y
|
||||
CONFIG_CGROUP_PERF=y
|
||||
CONFIG_CGROUP_BPF=y
|
||||
CONFIG_USER_NS=y
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_BPF_SYSCALL=y
|
||||
CONFIG_CGROUP_BPF=y
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
CONFIG_PROFILING=y
|
||||
CONFIG_SMP=y
|
||||
|
|
@ -270,12 +271,14 @@ CONFIG_BRIDGE_EBT_LOG=m
|
|||
CONFIG_BRIDGE_EBT_NFLOG=m
|
||||
CONFIG_BRIDGE=m
|
||||
CONFIG_NET_SCHED=y
|
||||
CONFIG_NET_SCH_TBF=y
|
||||
CONFIG_NET_SCH_NETEM=y
|
||||
CONFIG_NET_SCH_INGRESS=m
|
||||
CONFIG_NET_CLS_U32=m
|
||||
CONFIG_NET_CLS_CGROUP=y
|
||||
CONFIG_NET_CLS_BPF=m
|
||||
CONFIG_NET_EMATCH=y
|
||||
CONFIG_NET_EMATCH_IPSET=y
|
||||
CONFIG_NET_CLS_ACT=y
|
||||
CONFIG_NET_ACT_MIRRED=m
|
||||
CONFIG_NET_ACT_BPF=m
|
||||
|
|
|
|||
|
|
@ -1,4 +1,12 @@
|
|||
[
|
||||
{
|
||||
"name": "v1.9.0",
|
||||
"checksums": {
|
||||
"darwin": "2a074b0d842e3d9272444990374c6ffc51878c2d11c0434f54e15269b59593f9",
|
||||
"linux": "81d77d1babe63be393e0a3204aac7825eb35e0fdf58ffefd9f66508a43864866",
|
||||
"windows": "d11a957704c23670eac453a47897449a2aaab13b7dcd6424307f8932ac9f81bb"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "v1.8.2",
|
||||
"checksums": {
|
||||
|
|
|
|||
26
go.sum
26
go.sum
|
|
@ -49,8 +49,10 @@ github.com/Parallels/docker-machine-parallels v1.3.0 h1:RG1fyf3v1GwXMCeHRiZkB4tL
|
|||
github.com/Parallels/docker-machine-parallels v1.3.0/go.mod h1:HCOMm3Hulq/xuEVQMyZOuQlA+dSZpFY5kdCTZWjMVis=
|
||||
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/Rican7/retry v0.1.0/go.mod h1:FgOROf8P5bebcC1DS0PdOQiqGUridaZvikzUmkFW6gg=
|
||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
|
||||
|
|
@ -107,6 +109,7 @@ github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEe
|
|||
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/prettybench v0.0.0-20150116022406-03b8cfe5406c/go.mod h1:Xe6ZsFhtM8HrDku0pxJ3/Lr51rwykrzgFwpmTzleatY=
|
||||
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 h1:7aWHqerlJ41y6FOsEUvknqgXnGmJyJSbjhAWq5pO4F8=
|
||||
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw=
|
||||
github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b h1:T4nWG1TXIxeor8mAu5bFguPJgSIGhZqv/f0z55KCrJM=
|
||||
github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b/go.mod h1:TrMrLQfeENAPYPRsJuq3jsqdlRh3lvi6trTZJG8+tho=
|
||||
|
|
@ -156,6 +159,7 @@ github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2
|
|||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd h1:uVsMphB1eRx7xB1njzL3fuMdWRN8HtVzoUOItHMwv5c=
|
||||
github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
|
||||
|
|
@ -197,7 +201,9 @@ github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6
|
|||
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M=
|
||||
github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM=
|
||||
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4=
|
||||
github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8=
|
||||
github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
|
||||
github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
|
||||
|
|
@ -206,6 +212,7 @@ github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI
|
|||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
||||
github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
|
||||
|
|
@ -233,11 +240,13 @@ github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+
|
|||
github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
|
||||
github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
|
||||
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
|
||||
github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
|
||||
github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
|
||||
github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
|
||||
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
|
||||
github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o=
|
||||
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
||||
github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
|
||||
github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
|
||||
|
|
@ -251,6 +260,7 @@ github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nA
|
|||
github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
|
||||
github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
|
||||
github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY=
|
||||
github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc=
|
||||
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
|
||||
github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
|
||||
github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
|
||||
|
|
@ -260,6 +270,7 @@ github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dp
|
|||
github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
|
||||
github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
|
||||
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
|
||||
github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA=
|
||||
|
|
@ -329,6 +340,7 @@ github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAO
|
|||
github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8=
|
||||
github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/cadvisor v0.35.0/go.mod h1:1nql6U13uTHaLYB8rLS5x9IJc2qT6Xd/Tr1sTX6NE48=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
|
|
@ -372,6 +384,7 @@ github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY
|
|||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE=
|
||||
github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
|
||||
|
|
@ -421,10 +434,9 @@ github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht
|
|||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8=
|
||||
github.com/johanneswuerbach/nfsexports v0.0.0-20181204082207-1aa528dcb345 h1:XP1VL9iOZu4yz/rq8zj+yvB23XEY5erXRzp8JYmkWu0=
|
||||
github.com/johanneswuerbach/nfsexports v0.0.0-20181204082207-1aa528dcb345/go.mod h1:+c1/kUpg2zlkoWqTOvzDs36Wpbm3Gd1nlmtXAEB0WGU=
|
||||
github.com/johanneswuerbach/nfsexports v0.0.0-20200318065542-c48c3734757f h1:tL0xH80QVHQOde6Qqdohv6PewABH8l8N9pywZtuojJ0=
|
||||
github.com/johanneswuerbach/nfsexports v0.0.0-20200318065542-c48c3734757f/go.mod h1:+c1/kUpg2zlkoWqTOvzDs36Wpbm3Gd1nlmtXAEB0WGU=
|
||||
github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs=
|
||||
|
|
@ -478,7 +490,9 @@ github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LE
|
|||
github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc=
|
||||
github.com/libvirt/libvirt-go v3.4.0+incompatible h1:Cpyalgj1x8JIeTlL6SDYZBo7j8nY3+5XHqmi8DaunCk=
|
||||
github.com/libvirt/libvirt-go v3.4.0+incompatible/go.mod h1:34zsnB4iGeOv7Byj6qotuW8Ya4v4Tr43ttjz/F0wjLE=
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
|
||||
github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY=
|
||||
github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc=
|
||||
github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
|
||||
github.com/lpabon/godbc v0.1.1/go.mod h1:Jo9QV0cf3U6jZABgiJ2skINAXb9j8m51r07g4KI92ZA=
|
||||
|
|
@ -500,6 +514,7 @@ github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN
|
|||
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM=
|
||||
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
|
||||
github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
|
|
@ -561,6 +576,7 @@ github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8m
|
|||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mvdan/xurls v1.1.0/go.mod h1:tQlNn3BED8bE/15hnSL2HLkDeLWpNPAwtw7wkEq44oU=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0=
|
||||
github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E=
|
||||
|
|
@ -606,6 +622,7 @@ github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtP
|
|||
github.com/pelletier/go-toml v1.1.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 h1:JhzVVoYvbOACxoUmOs6V/G4D5nPVUW73rKvXxP4XUJc=
|
||||
github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE=
|
||||
|
|
@ -1025,11 +1042,13 @@ honnef.co/go/tools v0.0.1-2019.2.2/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt
|
|||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
k8s.io/api v0.17.3 h1:XAm3PZp3wnEdzekNkcmj/9Y1zdmQYJ1I4GKSBBZ8aG0=
|
||||
k8s.io/api v0.17.3/go.mod h1:YZ0OTkuw7ipbe305fMpIdf3GLXZKRigjtZaV5gzC2J0=
|
||||
k8s.io/apiextensions-apiserver v0.17.3 h1:WDZWkPcbgvchEdDd7ysL21GGPx3UKZQLDZXEkevT6n4=
|
||||
k8s.io/apiextensions-apiserver v0.17.3/go.mod h1:CJbCyMfkKftAd/X/V6OTHYhVn7zXnDdnkUjS1h0GTeY=
|
||||
k8s.io/apimachinery v0.17.3 h1:f+uZV6rm4/tHE7xXgLyToprg6xWairaClGVkm2t8omg=
|
||||
k8s.io/apimachinery v0.17.3/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g=
|
||||
k8s.io/apiserver v0.17.3 h1:faZbSuFtJ4dx09vctKZGHms/7bp3qFtbqb10Swswqfs=
|
||||
k8s.io/apiserver v0.17.3/go.mod h1:iJtsPpu1ZpEnHaNawpSV0nYTGBhhX2dUlnn7/QS7QiY=
|
||||
k8s.io/cli-runtime v0.17.3 h1:0ZlDdJgJBKsu77trRUynNiWsRuAvAVPBNaQfnt/1qtc=
|
||||
k8s.io/cli-runtime v0.17.3/go.mod h1:X7idckYphH4SZflgNpOOViSxetiMj6xI0viMAjM81TA=
|
||||
k8s.io/client-go v0.17.3 h1:deUna1Ksx05XeESH6XGCyONNFfiQmDdqeqUvicvP6nU=
|
||||
k8s.io/client-go v0.17.3/go.mod h1:cLXlTMtWHkuK4tD360KpWz2gG2KtdWEr/OT02i3emRQ=
|
||||
|
|
@ -1064,6 +1083,7 @@ k8s.io/kubelet v0.17.3/go.mod h1:Nh8owUHZcUXtnDAtmGnip36Nw+X6c4rbmDQlVyIhwMQ=
|
|||
k8s.io/kubernetes v1.17.3 h1:zWCppkLfHM+hoLqfbsrQ0cJnYw+4vAvedI92oQnjo/Q=
|
||||
k8s.io/kubernetes v1.17.3/go.mod h1:gt28rfzaskIzJ8d82TSJmGrJ0XZD0BBy8TcQvTuCI3w=
|
||||
k8s.io/legacy-cloud-providers v0.17.3/go.mod h1:ujZML5v8efVQxiXXTG+nck7SjP8KhMRjUYNIsoSkYI0=
|
||||
k8s.io/metrics v0.17.3 h1:IqXkNK+5E3vnobFD923Mn1QJEt3fb6+sK0wIjtBzOvw=
|
||||
k8s.io/metrics v0.17.3/go.mod h1:HEJGy1fhHOjHggW9rMDBJBD3YuGroH3Y1pnIRw9FFaI=
|
||||
k8s.io/repo-infra v0.0.1-alpha.1/go.mod h1:wO1t9WaB99V80ljbeENTnayuEEwNZt7gECYh/CEyOJ8=
|
||||
k8s.io/sample-apiserver v0.17.3/go.mod h1:cn/rvFIttGNqy1v88B5ZlDAbyyqDOoF7JHSwPiqNCNQ=
|
||||
|
|
@ -1081,6 +1101,7 @@ mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIa
|
|||
mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4=
|
||||
mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34/go.mod h1:H6SUd1XjIs+qQCyskXg5OFSrilMRUkD8ePJpHKDPaeY=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
sigs.k8s.io/kustomize v2.0.3+incompatible h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0=
|
||||
sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU=
|
||||
sigs.k8s.io/sig-storage-lib-external-provisioner v4.0.0+incompatible h1:qV3eFdgCp7Cp/ORjkJI9VBBEOntT+z385jLqdBtmgHA=
|
||||
sigs.k8s.io/sig-storage-lib-external-provisioner v4.0.0+incompatible/go.mod h1:qhqLyNwJC49PoUalmtzYb4s9fT8HOMBTLbTY1QoVOqI=
|
||||
|
|
@ -1089,4 +1110,5 @@ sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1
|
|||
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
|
||||
vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc h1:MksmcCZQWAQJCTA5T0jgI/0sJ51AVm4Z41MrmfczEoc=
|
||||
vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI=
|
||||
|
|
|
|||
|
|
@ -53,11 +53,17 @@ sudo systemctl is-active --quiet kubelet \
|
|||
|
||||
# conntrack is required for kubernetes 1.18 and higher for none driver
|
||||
if ! conntrack --version &>/dev/null; then
|
||||
echo "WARNING: No contrack is not installed"
|
||||
echo "WARNING: contrack is not installed. will try to install."
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get -qq -y install conntrack
|
||||
fi
|
||||
|
||||
# socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon
|
||||
if ! which socat &>/dev/null; then
|
||||
echo "WARNING: socat is not installed. will try to install."
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get -qq -y install socat
|
||||
fi
|
||||
|
||||
mkdir -p cron && gsutil -m rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron || echo "FAILED TO GET CRON FILES"
|
||||
sudo install cron/cleanup_and_reboot_Linux.sh /etc/cron.hourly/cleanup_and_reboot || echo "FAILED TO INSTALL CLEANUP"
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ import (
|
|||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/pkg/minikube/download"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
)
|
||||
|
|
@ -45,6 +46,7 @@ func init() {
|
|||
if k8sVersion != "" {
|
||||
k8sVersions = append(k8sVersions, k8sVersion)
|
||||
}
|
||||
viper.Set("preload", "true")
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
|
@ -61,8 +63,8 @@ func main() {
|
|||
|
||||
for _, kv := range k8sVersions {
|
||||
for _, cr := range containerRuntimes {
|
||||
tf := download.TarballName(kv)
|
||||
if tarballExists(tf) {
|
||||
tf := download.TarballName(kv, cr)
|
||||
if download.PreloadExists(kv, cr) {
|
||||
fmt.Printf("A preloaded tarball for k8s version %s already exists, skipping generation.\n", kv)
|
||||
continue
|
||||
}
|
||||
|
|
@ -77,13 +79,6 @@ func main() {
|
|||
}
|
||||
}
|
||||
|
||||
func tarballExists(tarballFilename string) bool {
|
||||
fmt.Println("Checking if tarball already exists...")
|
||||
gcsPath := fmt.Sprintf("gs://%s/%s", download.PreloadBucket, tarballFilename)
|
||||
cmd := exec.Command("gsutil", "stat", gcsPath)
|
||||
return cmd.Run() == nil
|
||||
}
|
||||
|
||||
func verifyDockerStorage() error {
|
||||
cmd := exec.Command("docker", "info", "-f", "{{.Info.Driver}}")
|
||||
var stderr bytes.Buffer
|
||||
|
|
|
|||
|
|
@ -30,13 +30,22 @@ func uploadTarball(tarballFilename string) error {
|
|||
hostPath := path.Join("out/", tarballFilename)
|
||||
gcsDest := fmt.Sprintf("gs://%s", download.PreloadBucket)
|
||||
cmd := exec.Command("gsutil", "cp", hostPath, gcsDest)
|
||||
if output, err := cmd.Output(); err != nil {
|
||||
fmt.Printf("Running: %v\n", cmd.Args)
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
return errors.Wrapf(err, "uploading %s to GCS bucket: %v\n%s", hostPath, err, string(output))
|
||||
}
|
||||
// Make tarball public to all users
|
||||
gcsPath := fmt.Sprintf("%s/%s", gcsDest, tarballFilename)
|
||||
cmd = exec.Command("gsutil", "acl", "ch", "-u", "AllUsers:R", gcsPath)
|
||||
if output, err := cmd.Output(); err != nil {
|
||||
fmt.Printf("Running: %v\n", cmd.Args)
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
fmt.Printf(`Failed to update ACLs on this tarball in GCS. Please run
|
||||
|
||||
gsutil acl ch -u AllUsers:R %s
|
||||
|
||||
manually to make this link public, or rerun this script to rebuild and reupload the tarball.
|
||||
|
||||
`, gcsPath)
|
||||
return errors.Wrapf(err, "uploading %s to GCS bucket: %v\n%s", hostPath, err, string(output))
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -34,6 +34,7 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
"k8s.io/minikube/pkg/minikube/storageclass"
|
||||
"k8s.io/minikube/pkg/util/retry"
|
||||
)
|
||||
|
||||
// defaultStorageClassProvisioner is the name of the default storage class provisioner
|
||||
|
|
@ -211,13 +212,17 @@ func enableOrDisableAddonInternal(cc *config.ClusterConfig, addon *assets.Addon,
|
|||
}
|
||||
|
||||
command := kubectlCommand(cc, deployFiles, enable)
|
||||
glog.Infof("Running: %v", command)
|
||||
rr, err := cmd.RunCmd(command)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "addon apply")
|
||||
|
||||
// Retry, because sometimes we race against an apiserver restart
|
||||
apply := func() error {
|
||||
_, err := cmd.RunCmd(command)
|
||||
if err != nil {
|
||||
glog.Warningf("apply failed, will retry: %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
glog.Infof("output:\n%s", rr.Output())
|
||||
return nil
|
||||
|
||||
return retry.Expo(apply, 1*time.Second, time.Second*30)
|
||||
}
|
||||
|
||||
// enableOrDisableStorageClasses enables or disables storage classes
|
||||
|
|
|
|||
|
|
@ -17,7 +17,6 @@ limitations under the License.
|
|||
package kic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"os/exec"
|
||||
|
|
@ -121,14 +120,14 @@ func (d *Driver) Create() error {
|
|||
return errors.Wrap(err, "prepare kic ssh")
|
||||
}
|
||||
|
||||
// If preload doesn't exist, don't both extracting tarball to volume
|
||||
// If preload doesn't exist, don't bother extracting tarball to volume
|
||||
if !download.PreloadExists(d.NodeConfig.KubernetesVersion, d.NodeConfig.ContainerRuntime) {
|
||||
return nil
|
||||
}
|
||||
t := time.Now()
|
||||
glog.Infof("Starting extracting preloaded images to volume")
|
||||
// Extract preloaded images to container
|
||||
if err := oci.ExtractTarballToVolume(download.TarballPath(d.NodeConfig.KubernetesVersion), params.Name, BaseImage); err != nil {
|
||||
if err := oci.ExtractTarballToVolume(download.TarballPath(d.NodeConfig.KubernetesVersion, d.NodeConfig.ContainerRuntime), params.Name, BaseImage); err != nil {
|
||||
glog.Infof("Unable to extract preloaded tarball to volume: %v", err)
|
||||
} else {
|
||||
glog.Infof("Took %f seconds to extract preloaded images to volume", time.Since(t).Seconds())
|
||||
|
|
@ -220,20 +219,12 @@ func (d *Driver) GetURL() (string, error) {
|
|||
|
||||
// GetState returns the state that the host is in (running, stopped, etc)
|
||||
func (d *Driver) GetState() (state.State, error) {
|
||||
// allow no more than 2 seconds for this. when this takes long this means deadline passed
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer cancel()
|
||||
|
||||
cmd := exec.CommandContext(ctx, d.NodeConfig.OCIBinary, "inspect", "-f", "{{.State.Status}}", d.MachineName)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if ctx.Err() == context.DeadlineExceeded {
|
||||
glog.Errorf("GetState for %s took longer than normal. Restarting your %s daemon might fix this issue.", d.MachineName, d.OCIBinary)
|
||||
return state.Error, fmt.Errorf("inspect %s timeout", d.MachineName)
|
||||
}
|
||||
o := strings.TrimSpace(string(out))
|
||||
out, err := oci.WarnIfSlow(d.NodeConfig.OCIBinary, "inspect", "-f", "{{.State.Status}}", d.MachineName)
|
||||
if err != nil {
|
||||
return state.Error, errors.Wrapf(err, "%s: %s", strings.Join(cmd.Args, " "), o)
|
||||
return state.Error, err
|
||||
}
|
||||
|
||||
o := strings.TrimSpace(string(out))
|
||||
switch o {
|
||||
case "running":
|
||||
return state.Running, nil
|
||||
|
|
|
|||
|
|
@ -29,6 +29,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/localpath"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
"k8s.io/minikube/pkg/util/retry"
|
||||
|
||||
"fmt"
|
||||
|
|
@ -232,19 +233,39 @@ func ContainerID(ociBinary string, nameOrID string) (string, error) {
|
|||
return string(out), err
|
||||
}
|
||||
|
||||
// ContainerExists checks if container name exists (either running or exited)
|
||||
func ContainerExists(ociBin string, name string) (bool, error) {
|
||||
// allow no more than 3 seconds for this.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
// WarnIfSlow runs an oci command, warning about performance issues
|
||||
func WarnIfSlow(arg ...string) ([]byte, error) {
|
||||
killTime := 15 * time.Second
|
||||
warnTime := 2 * time.Second
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), killTime)
|
||||
defer cancel()
|
||||
|
||||
cmd := exec.CommandContext(ctx, ociBin, "ps", "-a", "--format", "{{.Names}}")
|
||||
out, err := cmd.CombinedOutput()
|
||||
|
||||
if ctx.Err() == context.DeadlineExceeded {
|
||||
return false, fmt.Errorf("time out running %s ps -a", ociBin)
|
||||
start := time.Now()
|
||||
glog.Infof("executing with %s timeout: %v", arg, killTime)
|
||||
cmd := exec.CommandContext(ctx, arg[0], arg[1:]...)
|
||||
stdout, err := cmd.Output()
|
||||
d := time.Since(start)
|
||||
if d > warnTime {
|
||||
out.WarningT(`Executing "{{.command}}" took an unusually long time: {{.duration}}`, out.V{"command": strings.Join(cmd.Args, " "), "duration": d})
|
||||
out.T(out.Tip, `Restarting the {{.name}} service may improve performance.`, out.V{"name": arg[0]})
|
||||
}
|
||||
|
||||
if ctx.Err() == context.DeadlineExceeded {
|
||||
return stdout, fmt.Errorf("%q timed out after %s", strings.Join(cmd.Args, " "), killTime)
|
||||
}
|
||||
if err != nil {
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
return stdout, fmt.Errorf("%q failed: %v: %s", strings.Join(cmd.Args, " "), exitErr, exitErr.Stderr)
|
||||
}
|
||||
return stdout, fmt.Errorf("%q failed: %v", strings.Join(cmd.Args, " "), err)
|
||||
}
|
||||
return stdout, nil
|
||||
}
|
||||
|
||||
// ContainerExists checks if container name exists (either running or exited)
|
||||
func ContainerExists(ociBin string, name string) (bool, error) {
|
||||
out, err := WarnIfSlow(ociBin, "ps", "-a", "--format", "{{.Names}}")
|
||||
if err != nil {
|
||||
return false, errors.Wrapf(err, string(out))
|
||||
}
|
||||
|
|
@ -410,12 +431,10 @@ func withPortMappings(portMappings []PortMapping) createOpt {
|
|||
|
||||
// listContainersByLabel returns all the container names with a specified label
|
||||
func listContainersByLabel(ociBinary string, label string) ([]string, error) {
|
||||
|
||||
// allow no more than 5 seconds for docker ps
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
cmd := exec.CommandContext(ctx, ociBinary, "ps", "-a", "--filter", fmt.Sprintf("label=%s", label), "--format", "{{.Names}}")
|
||||
stdout, err := cmd.Output()
|
||||
stdout, err := WarnIfSlow(ociBinary, "ps", "-a", "--filter", fmt.Sprintf("label=%s", label), "--format", "{{.Names}}")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := bufio.NewScanner(bytes.NewReader(stdout))
|
||||
var names []string
|
||||
for s.Scan() {
|
||||
|
|
@ -448,21 +467,6 @@ func PointToHostDockerDaemon() error {
|
|||
|
||||
// ContainerStatus returns status of a container running,exited,...
|
||||
func ContainerStatus(ociBin string, name string) (string, error) {
|
||||
// allow no more than 2 seconds for this. when this takes long this means deadline passed
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer cancel()
|
||||
|
||||
cmd := exec.CommandContext(ctx, ociBin, "inspect", name, "--format={{.State.Status}}")
|
||||
out, err := cmd.CombinedOutput()
|
||||
|
||||
if ctx.Err() == context.DeadlineExceeded {
|
||||
glog.Warningf("%s inspect %s took longer than normal. Restarting your %s daemon might fix this issue.", ociBin, name, ociBin)
|
||||
return strings.TrimSpace(string(out)), fmt.Errorf("inspect %s timeout", name)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return string(out), errors.Wrapf(err, "inspecting container: output %s", out)
|
||||
}
|
||||
|
||||
return strings.TrimSpace(string(out)), nil
|
||||
out, err := WarnIfSlow(ociBin, "inspect", name, "--format={{.State.Status}}")
|
||||
return strings.TrimSpace(string(out)), err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,11 +19,9 @@ package oci
|
|||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
|
|
@ -42,16 +40,8 @@ func DeleteAllVolumesByLabel(ociBin string, label string) []error {
|
|||
}
|
||||
|
||||
for _, v := range vs {
|
||||
// allow no more than 3 seconds for this. when this takes long this means deadline passed
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
cmd := exec.CommandContext(ctx, ociBin, "volume", "rm", "--force", v)
|
||||
if ctx.Err() == context.DeadlineExceeded {
|
||||
glog.Warningf("removing volume with label %s took longer than normal. Restarting your %s daemon might fix this issue.", label, ociBin)
|
||||
deleteErrs = append(deleteErrs, fmt.Errorf("delete deadline exceeded for %s", label))
|
||||
}
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
deleteErrs = append(deleteErrs, fmt.Errorf("deleting volume %s: output: %s", v, string(out)))
|
||||
if _, err := WarnIfSlow(ociBin, "volume", "rm", "--force", v); err != nil {
|
||||
deleteErrs = append(deleteErrs, fmt.Errorf("deleting %q", v))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -65,19 +55,8 @@ func PruneAllVolumesByLabel(ociBin string, label string) []error {
|
|||
var deleteErrs []error
|
||||
glog.Infof("trying to prune all %s volumes with label %s", ociBin, label)
|
||||
|
||||
// allow no more than 3 seconds for this. when this takes long this means deadline passed
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// try to prune afterwards just in case delete didn't go through
|
||||
cmd := exec.CommandContext(ctx, ociBin, "volume", "prune", "-f", "--filter", "label="+label)
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
deleteErrs = append(deleteErrs, errors.Wrapf(err, "prune volume by label %s: %s", label, string(out)))
|
||||
}
|
||||
|
||||
if ctx.Err() == context.DeadlineExceeded {
|
||||
glog.Warningf("pruning volume with label %s took longer than normal. Restarting your %s daemon might fix this issue.", label, ociBin)
|
||||
deleteErrs = append(deleteErrs, fmt.Errorf("prune deadline exceeded for %s", label))
|
||||
if _, err := WarnIfSlow(ociBin, "volume", "prune", "-f", "--filter", "label="+label); err != nil {
|
||||
deleteErrs = append(deleteErrs, errors.Wrapf(err, "prune volume by label %s", label))
|
||||
}
|
||||
|
||||
return deleteErrs
|
||||
|
|
|
|||
|
|
@ -61,8 +61,8 @@ func extraKubeletOpts(mc config.ClusterConfig, nc config.Node, r cruntime.Manage
|
|||
if _, ok := extraOpts["node-ip"]; !ok {
|
||||
extraOpts["node-ip"] = cp.IP
|
||||
}
|
||||
nodeName := KubeNodeName(mc, nc)
|
||||
if nodeName != "" {
|
||||
if _, ok := extraOpts["hostname-override"]; !ok {
|
||||
nodeName := KubeNodeName(mc, nc)
|
||||
extraOpts["hostname-override"] = nodeName
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ Wants=crio.service
|
|||
|
||||
[Service]
|
||||
ExecStart=
|
||||
ExecStart=/var/lib/minikube/binaries/v1.18.0-rc.1/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=/var/run/crio/crio.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=/var/run/crio/crio.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
|
||||
ExecStart=/var/lib/minikube/binaries/v1.18.0/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=/var/run/crio/crio.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=/var/run/crio/crio.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
|
||||
|
||||
[Install]
|
||||
`,
|
||||
|
|
@ -107,7 +107,7 @@ Wants=containerd.service
|
|||
|
||||
[Service]
|
||||
ExecStart=
|
||||
ExecStart=/var/lib/minikube/binaries/v1.18.0-rc.1/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
|
||||
ExecStart=/var/lib/minikube/binaries/v1.18.0/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
|
||||
|
||||
[Install]
|
||||
`,
|
||||
|
|
@ -140,7 +140,7 @@ Wants=containerd.service
|
|||
|
||||
[Service]
|
||||
ExecStart=
|
||||
ExecStart=/var/lib/minikube/binaries/v1.18.0-rc.1/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.200 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
|
||||
ExecStart=/var/lib/minikube/binaries/v1.18.0/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.200 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
|
||||
|
||||
[Install]
|
||||
`,
|
||||
|
|
@ -167,7 +167,7 @@ Wants=docker.socket
|
|||
|
||||
[Service]
|
||||
ExecStart=
|
||||
ExecStart=/var/lib/minikube/binaries/v1.18.0-rc.1/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=docker --fail-swap-on=false --hostname-override=minikube --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-infra-container-image=docker-proxy-image.io/google_containers/pause:3.2 --pod-manifest-path=/etc/kubernetes/manifests
|
||||
ExecStart=/var/lib/minikube/binaries/v1.18.0/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=docker --fail-swap-on=false --hostname-override=minikube --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-infra-container-image=docker-proxy-image.io/google_containers/pause:3.2 --pod-manifest-path=/etc/kubernetes/manifests
|
||||
|
||||
[Install]
|
||||
`,
|
||||
|
|
|
|||
|
|
@ -30,9 +30,11 @@ import (
|
|||
|
||||
"github.com/docker/machine/libmachine/state"
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
core "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/version"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper"
|
||||
|
|
@ -43,7 +45,7 @@ import (
|
|||
)
|
||||
|
||||
// minLogCheckTime how long to wait before spamming error logs to console
|
||||
const minLogCheckTime = 30 * time.Second
|
||||
const minLogCheckTime = 60 * time.Second
|
||||
|
||||
// WaitForAPIServerProcess waits for api server to be healthy returns error if it doesn't
|
||||
func WaitForAPIServerProcess(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner, start time.Time, timeout time.Duration) error {
|
||||
|
|
@ -61,6 +63,7 @@ func WaitForAPIServerProcess(r cruntime.Manager, bs bootstrapper.Bootstrapper, c
|
|||
if _, ierr := apiServerPID(cr); ierr != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
|
|
@ -180,7 +183,7 @@ func WaitForSystemPods(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg con
|
|||
}
|
||||
|
||||
// WaitForHealthyAPIServer waits for api server status to be running
|
||||
func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner, start time.Time, ip string, port int, timeout time.Duration) error {
|
||||
func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner, client *kubernetes.Clientset, start time.Time, ip string, port int, timeout time.Duration) error {
|
||||
glog.Infof("waiting for apiserver healthz status ...")
|
||||
hStart := time.Now()
|
||||
|
||||
|
|
@ -208,7 +211,35 @@ func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, c
|
|||
if err := wait.PollImmediate(kconst.APICallRetryInterval, kconst.DefaultControlPlaneTimeout, healthz); err != nil {
|
||||
return fmt.Errorf("apiserver healthz never reported healthy")
|
||||
}
|
||||
glog.Infof("duration metric: took %s to wait for apiserver healthz status ...", time.Since(hStart))
|
||||
|
||||
vcheck := func() (bool, error) {
|
||||
if time.Since(start) > timeout {
|
||||
return false, fmt.Errorf("cluster wait timed out during version check")
|
||||
}
|
||||
if err := APIServerVersionMatch(client, cfg.KubernetesConfig.KubernetesVersion); err != nil {
|
||||
glog.Warningf("api server version match failed: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if err := wait.PollImmediate(kconst.APICallRetryInterval, kconst.DefaultControlPlaneTimeout, vcheck); err != nil {
|
||||
return fmt.Errorf("controlPlane never updated to %s", cfg.KubernetesConfig.KubernetesVersion)
|
||||
}
|
||||
|
||||
glog.Infof("duration metric: took %s to wait for apiserver health ...", time.Since(hStart))
|
||||
return nil
|
||||
}
|
||||
|
||||
func APIServerVersionMatch(client *kubernetes.Clientset, expected string) error {
|
||||
vi, err := client.ServerVersion()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "server version")
|
||||
}
|
||||
glog.Infof("control plane version: %s", vi)
|
||||
if version.CompareKubeAwareVersionStrings(vi.String(), expected) != 0 {
|
||||
return fmt.Errorf("controlPane = %q, expected: %q", vi.String(), expected)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -51,10 +51,12 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/kubelet"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
"k8s.io/minikube/pkg/minikube/vmpath"
|
||||
"k8s.io/minikube/pkg/util"
|
||||
"k8s.io/minikube/pkg/util/retry"
|
||||
"k8s.io/minikube/pkg/version"
|
||||
)
|
||||
|
||||
|
|
@ -129,7 +131,7 @@ func (k *Bootstrapper) LogCommands(cfg config.ClusterConfig, o bootstrapper.LogO
|
|||
dmesg.WriteString(fmt.Sprintf(" | tail -n %d", o.Lines))
|
||||
}
|
||||
|
||||
describeNodes := fmt.Sprintf("sudo %s describe node -A --kubeconfig=%s",
|
||||
describeNodes := fmt.Sprintf("sudo %s describe nodes --kubeconfig=%s",
|
||||
path.Join(vmpath.GuestPersistentDir, "binaries", cfg.KubernetesConfig.KubernetesVersion, "kubectl"),
|
||||
path.Join(vmpath.GuestPersistentDir, "kubeconfig"))
|
||||
|
||||
|
|
@ -181,20 +183,7 @@ func (k *Bootstrapper) clearStaleConfigs(cfg config.ClusterConfig) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// StartCluster starts the cluster
|
||||
func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error {
|
||||
err := bsutil.ExistingConfig(k.c)
|
||||
if err == nil { // if there is an existing cluster don't reconfigure it
|
||||
return k.restartCluster(cfg)
|
||||
}
|
||||
glog.Infof("existence check: %v", err)
|
||||
|
||||
start := time.Now()
|
||||
glog.Infof("StartCluster: %+v", cfg)
|
||||
defer func() {
|
||||
glog.Infof("StartCluster complete in %s", time.Since(start))
|
||||
}()
|
||||
|
||||
func (k *Bootstrapper) init(cfg config.ClusterConfig) error {
|
||||
version, err := util.ParseKubernetesVersion(cfg.KubernetesConfig.KubernetesVersion)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "parsing kubernetes version")
|
||||
|
|
@ -237,10 +226,10 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error {
|
|||
}
|
||||
|
||||
conf := bsutil.KubeadmYamlPath
|
||||
c := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo mv %s.new %s && %s init --config %s %s --ignore-preflight-errors=%s", conf, conf, bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), conf, extraFlags, strings.Join(ignore, ",")))
|
||||
rr, err := k.c.RunCmd(c)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "init failed. output: %q", rr.Output())
|
||||
c := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s init --config %s %s --ignore-preflight-errors=%s",
|
||||
bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), conf, extraFlags, strings.Join(ignore, ",")))
|
||||
if _, err := k.c.RunCmd(c); err != nil {
|
||||
return errors.Wrap(err, "run")
|
||||
}
|
||||
|
||||
if cfg.Driver == driver.Docker {
|
||||
|
|
@ -258,12 +247,75 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error {
|
|||
}
|
||||
|
||||
if err := k.elevateKubeSystemPrivileges(cfg); err != nil {
|
||||
glog.Warningf("unable to create cluster role binding, some addons might not work : %v. ", err)
|
||||
glog.Warningf("unable to create cluster role binding, some addons might not work: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// unpause unpauses any Kubernetes backplane components
|
||||
func (k *Bootstrapper) unpause(cfg config.ClusterConfig) error {
|
||||
|
||||
cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: k.c})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ids, err := cr.ListContainers(cruntime.ListOptions{State: cruntime.Paused, Namespaces: []string{"kube-system"}})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "list paused")
|
||||
}
|
||||
|
||||
if len(ids) > 0 {
|
||||
if err := cr.UnpauseContainers(ids); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartCluster starts the cluster
|
||||
func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error {
|
||||
start := time.Now()
|
||||
glog.Infof("StartCluster: %+v", cfg)
|
||||
defer func() {
|
||||
glog.Infof("StartCluster complete in %s", time.Since(start))
|
||||
}()
|
||||
|
||||
// Before we start, ensure that no paused components are lurking around
|
||||
if err := k.unpause(cfg); err != nil {
|
||||
glog.Warningf("unpause failed: %v", err)
|
||||
}
|
||||
|
||||
if err := bsutil.ExistingConfig(k.c); err == nil {
|
||||
glog.Infof("found existing configuration files, will attempt cluster restart")
|
||||
rerr := k.restartCluster(cfg)
|
||||
if rerr == nil {
|
||||
return nil
|
||||
}
|
||||
out.T(out.Embarrassed, "Unable to restart cluster, will reset it: {{.error}}", out.V{"error": rerr})
|
||||
if err := k.DeleteCluster(cfg.KubernetesConfig); err != nil {
|
||||
glog.Warningf("delete failed: %v", err)
|
||||
}
|
||||
// Fall-through to init
|
||||
}
|
||||
|
||||
conf := bsutil.KubeadmYamlPath
|
||||
if _, err := k.c.RunCmd(exec.Command("sudo", "cp", conf+".new", conf)); err != nil {
|
||||
return errors.Wrap(err, "cp")
|
||||
}
|
||||
|
||||
err := k.init(cfg)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
out.T(out.Conflict, "initialization failed, will try again: {{.error}}", out.V{"error": err})
|
||||
if err := k.DeleteCluster(cfg.KubernetesConfig); err != nil {
|
||||
glog.Warningf("delete failed: %v", err)
|
||||
}
|
||||
return k.init(cfg)
|
||||
}
|
||||
|
||||
func (k *Bootstrapper) controlPlaneEndpoint(cfg config.ClusterConfig) (string, int, error) {
|
||||
cp, err := config.PrimaryControlPlane(&cfg)
|
||||
if err != nil {
|
||||
|
|
@ -324,23 +376,23 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time
|
|||
return err
|
||||
}
|
||||
|
||||
if err := kverify.WaitForHealthyAPIServer(cr, k, cfg, k.c, start, ip, port, timeout); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c, err := k.client(ip, port)
|
||||
client, err := k.client(ip, port)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "get k8s client")
|
||||
}
|
||||
|
||||
if err := kverify.WaitForSystemPods(cr, k, cfg, k.c, c, start, timeout); err != nil {
|
||||
if err := kverify.WaitForHealthyAPIServer(cr, k, cfg, k.c, client, start, ip, port, timeout); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := kverify.WaitForSystemPods(cr, k, cfg, k.c, client, start, timeout); err != nil {
|
||||
return errors.Wrap(err, "waiting for system pods")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// needsReset returns whether or not the cluster needs to be reconfigured
|
||||
func (k *Bootstrapper) needsReset(conf string, ip string, port int, client *kubernetes.Clientset) bool {
|
||||
func (k *Bootstrapper) needsReset(conf string, ip string, port int, client *kubernetes.Clientset, version string) bool {
|
||||
if rr, err := k.c.RunCmd(exec.Command("sudo", "diff", "-u", conf, conf+".new")); err != nil {
|
||||
glog.Infof("needs reset: configs differ:\n%s", rr.Output())
|
||||
return true
|
||||
|
|
@ -361,6 +413,12 @@ func (k *Bootstrapper) needsReset(conf string, ip string, port int, client *kube
|
|||
glog.Infof("needs reset: %v", err)
|
||||
return true
|
||||
}
|
||||
|
||||
if err := kverify.APIServerVersionMatch(client, version); err != nil {
|
||||
glog.Infof("needs reset: %v", err)
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
|
|
@ -401,7 +459,7 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error {
|
|||
|
||||
// If the cluster is running, check if we have any work to do.
|
||||
conf := bsutil.KubeadmYamlPath
|
||||
if !k.needsReset(conf, ip, port, client) {
|
||||
if !k.needsReset(conf, ip, port, client, cfg.KubernetesConfig.KubernetesVersion) {
|
||||
glog.Infof("Taking a shortcut, as the cluster seems to be properly configured")
|
||||
return nil
|
||||
}
|
||||
|
|
@ -410,8 +468,8 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error {
|
|||
return errors.Wrap(err, "clearing stale configs")
|
||||
}
|
||||
|
||||
if _, err := k.c.RunCmd(exec.Command("sudo", "mv", conf+".new", conf)); err != nil {
|
||||
return errors.Wrap(err, "mv")
|
||||
if _, err := k.c.RunCmd(exec.Command("sudo", "cp", conf+".new", conf)); err != nil {
|
||||
return errors.Wrap(err, "cp")
|
||||
}
|
||||
|
||||
baseCmd := fmt.Sprintf("%s %s", bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), phase)
|
||||
|
|
@ -425,9 +483,9 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error {
|
|||
glog.Infof("resetting cluster from %s", conf)
|
||||
// Run commands one at a time so that it is easier to root cause failures.
|
||||
for _, c := range cmds {
|
||||
rr, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", c))
|
||||
_, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", c))
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "running cmd: %s", rr.Command())
|
||||
return errors.Wrap(err, "run")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -441,12 +499,22 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error {
|
|||
return errors.Wrap(err, "apiserver healthz")
|
||||
}
|
||||
|
||||
if err := kverify.WaitForHealthyAPIServer(cr, k, cfg, k.c, client, time.Now(), ip, port, kconst.DefaultControlPlaneTimeout); err != nil {
|
||||
return errors.Wrap(err, "apiserver health")
|
||||
}
|
||||
|
||||
if err := kverify.WaitForSystemPods(cr, k, cfg, k.c, client, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil {
|
||||
return errors.Wrap(err, "system pods")
|
||||
}
|
||||
|
||||
if rr, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("%s phase addon all --config %s", baseCmd, conf))); err != nil {
|
||||
return errors.Wrapf(err, fmt.Sprintf("addon phase cmd:%q", rr.Command()))
|
||||
// This can fail during upgrades if the old pods have not shut down yet
|
||||
addonPhase := func() error {
|
||||
_, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("%s phase addon all --config %s", baseCmd, conf)))
|
||||
return err
|
||||
}
|
||||
if err = retry.Expo(addonPhase, 1*time.Second, 30*time.Second); err != nil {
|
||||
glog.Warningf("addon install failed, wil retry: %v", err)
|
||||
return errors.Wrap(err, "addons")
|
||||
}
|
||||
|
||||
if err := bsutil.AdjustResourceLimits(k.c); err != nil {
|
||||
|
|
@ -504,11 +572,32 @@ func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error {
|
|||
cmd = fmt.Sprintf("%s reset", bsutil.InvokeKubeadm(k8s.KubernetesVersion))
|
||||
}
|
||||
|
||||
if rr, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", cmd)); err != nil {
|
||||
return errors.Wrapf(err, "kubeadm reset: cmd: %q", rr.Command())
|
||||
rr, derr := k.c.RunCmd(exec.Command("/bin/bash", "-c", cmd))
|
||||
if derr != nil {
|
||||
glog.Warningf("%s: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
if err := kubelet.ForceStop(k.c); err != nil {
|
||||
glog.Warningf("stop kubelet: %v", err)
|
||||
}
|
||||
|
||||
cr, err := cruntime.New(cruntime.Config{Type: k8s.ContainerRuntime, Runner: k.c, Socket: k8s.CRISocket})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "runtime")
|
||||
}
|
||||
|
||||
containers, err := cr.ListContainers(cruntime.ListOptions{Namespaces: []string{"kube-system"}})
|
||||
if err != nil {
|
||||
glog.Warningf("unable to list kube-system containers: %v", err)
|
||||
}
|
||||
if len(containers) > 0 {
|
||||
glog.Warningf("found %d kube-system containers to stop", len(containers))
|
||||
if err := cr.StopContainers(containers); err != nil {
|
||||
glog.Warningf("error stopping containers: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return derr
|
||||
}
|
||||
|
||||
// SetupCerts sets up certificates within the cluster.
|
||||
|
|
@ -531,7 +620,7 @@ func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error {
|
|||
}
|
||||
|
||||
if err := r.Preload(cfg.KubernetesConfig); err != nil {
|
||||
return errors.Wrap(err, "preloading")
|
||||
glog.Infof("prelaoding failed, will try to load cached images: %v", err)
|
||||
}
|
||||
|
||||
if cfg.KubernetesConfig.ShouldLoadCachedImages {
|
||||
|
|
@ -619,7 +708,7 @@ func reloadKubelet(runner command.Runner) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
startCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo mv %s.new %s && sudo mv %s.new %s && sudo systemctl daemon-reload && sudo systemctl restart kubelet", svc, svc, conf, conf))
|
||||
startCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo cp %s.new %s && sudo cp %s.new %s && sudo systemctl daemon-reload && sudo systemctl restart kubelet", svc, svc, conf, conf))
|
||||
if _, err := runner.RunCmd(startCmd); err != nil {
|
||||
return errors.Wrap(err, "starting kubelet")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package constants
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path/filepath"
|
||||
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
|
@ -26,9 +27,9 @@ import (
|
|||
|
||||
const (
|
||||
// DefaultKubernetesVersion is the default kubernetes version
|
||||
DefaultKubernetesVersion = "v1.18.0-rc.1"
|
||||
DefaultKubernetesVersion = "v1.18.0"
|
||||
// NewestKubernetesVersion is the newest Kubernetes version to test against
|
||||
NewestKubernetesVersion = "v1.18.0-rc.1"
|
||||
NewestKubernetesVersion = "v1.18.0"
|
||||
// OldestKubernetesVersion is the oldest Kubernetes version to test against
|
||||
OldestKubernetesVersion = "v1.11.10"
|
||||
// DefaultClusterName is the default nane for the k8s cluster
|
||||
|
|
@ -100,4 +101,7 @@ var (
|
|||
"storage-gluster",
|
||||
"istio-operator",
|
||||
}
|
||||
|
||||
// ErrMachineMissing is returned when virtual machine does not exist due to user interrupt cancel(i.e. Ctrl + C)
|
||||
ErrMachineMissing = errors.New("machine does not exist")
|
||||
)
|
||||
|
|
|
|||
|
|
@ -30,6 +30,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/download"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
)
|
||||
|
||||
|
|
@ -313,5 +314,8 @@ func (r *Containerd) SystemLogCmd(len int) string {
|
|||
|
||||
// Preload preloads the container runtime with k8s images
|
||||
func (r *Containerd) Preload(cfg config.KubernetesConfig) error {
|
||||
if !download.PreloadExists(cfg.KubernetesVersion, cfg.ContainerRuntime) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("not yet implemented for %s", r.Name())
|
||||
}
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/download"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
)
|
||||
|
||||
|
|
@ -230,5 +231,8 @@ func (r *CRIO) SystemLogCmd(len int) string {
|
|||
|
||||
// Preload preloads the container runtime with k8s images
|
||||
func (r *CRIO) Preload(cfg config.KubernetesConfig) error {
|
||||
if !download.PreloadExists(cfg.KubernetesVersion, cfg.ContainerRuntime) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("not yet implemented for %s", r.Name())
|
||||
}
|
||||
|
|
|
|||
|
|
@ -294,6 +294,7 @@ func (r *Docker) Preload(cfg config.KubernetesConfig) error {
|
|||
return nil
|
||||
}
|
||||
k8sVersion := cfg.KubernetesVersion
|
||||
cRuntime := cfg.ContainerRuntime
|
||||
|
||||
// If images already exist, return
|
||||
images, err := images.Kubeadm(cfg.ImageRepository, k8sVersion)
|
||||
|
|
@ -310,7 +311,7 @@ func (r *Docker) Preload(cfg config.KubernetesConfig) error {
|
|||
glog.Infof("error saving reference store: %v", err)
|
||||
}
|
||||
|
||||
tarballPath := download.TarballPath(k8sVersion)
|
||||
tarballPath := download.TarballPath(k8sVersion, cRuntime)
|
||||
targetDir := "/"
|
||||
targetName := "preloaded.tar.lz4"
|
||||
dest := path.Join(targetDir, targetName)
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@ import (
|
|||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"cloud.google.com/go/storage"
|
||||
"google.golang.org/api/option"
|
||||
|
|
@ -46,13 +47,13 @@ const (
|
|||
)
|
||||
|
||||
// TarballName returns name of the tarball
|
||||
func TarballName(k8sVersion string) string {
|
||||
return fmt.Sprintf("preloaded-images-k8s-%s-%s-docker-overlay2-amd64.tar.lz4", PreloadVersion, k8sVersion)
|
||||
func TarballName(k8sVersion, containerRuntime string) string {
|
||||
return fmt.Sprintf("preloaded-images-k8s-%s-%s-%s-overlay2-%s.tar.lz4", PreloadVersion, k8sVersion, containerRuntime, runtime.GOARCH)
|
||||
}
|
||||
|
||||
// returns the name of the checksum file
|
||||
func checksumName(k8sVersion string) string {
|
||||
return fmt.Sprintf("%s.checksum", TarballName(k8sVersion))
|
||||
func checksumName(k8sVersion, containerRuntime string) string {
|
||||
return fmt.Sprintf("%s.checksum", TarballName(k8sVersion, containerRuntime))
|
||||
}
|
||||
|
||||
// returns target dir for all cached items related to preloading
|
||||
|
|
@ -61,22 +62,23 @@ func targetDir() string {
|
|||
}
|
||||
|
||||
// PreloadChecksumPath returns the local path to the cached checksum file
|
||||
func PreloadChecksumPath(k8sVersion string) string {
|
||||
return filepath.Join(targetDir(), checksumName(k8sVersion))
|
||||
func PreloadChecksumPath(k8sVersion, containerRuntime string) string {
|
||||
return filepath.Join(targetDir(), checksumName(k8sVersion, containerRuntime))
|
||||
}
|
||||
|
||||
// TarballPath returns the local path to the cached preload tarball
|
||||
func TarballPath(k8sVersion string) string {
|
||||
return filepath.Join(targetDir(), TarballName(k8sVersion))
|
||||
func TarballPath(k8sVersion, containerRuntime string) string {
|
||||
return filepath.Join(targetDir(), TarballName(k8sVersion, containerRuntime))
|
||||
}
|
||||
|
||||
// remoteTarballURL returns the URL for the remote tarball in GCS
|
||||
func remoteTarballURL(k8sVersion string) string {
|
||||
return fmt.Sprintf("https://storage.googleapis.com/%s/%s", PreloadBucket, TarballName(k8sVersion))
|
||||
func remoteTarballURL(k8sVersion, containerRuntime string) string {
|
||||
return fmt.Sprintf("https://storage.googleapis.com/%s/%s", PreloadBucket, TarballName(k8sVersion, containerRuntime))
|
||||
}
|
||||
|
||||
// PreloadExists returns true if there is a preloaded tarball that can be used
|
||||
func PreloadExists(k8sVersion, containerRuntime string) bool {
|
||||
glog.Infof("Checking if preload exists for k8s version %s and runtime %s", k8sVersion, containerRuntime)
|
||||
if !viper.GetBool("preload") {
|
||||
return false
|
||||
}
|
||||
|
|
@ -85,17 +87,18 @@ func PreloadExists(k8sVersion, containerRuntime string) bool {
|
|||
// and https://github.com/kubernetes/minikube/issues/6934
|
||||
// to track status of adding containerd & crio
|
||||
if containerRuntime != "docker" {
|
||||
glog.Info("Container runtime isn't docker, skipping preload")
|
||||
return false
|
||||
}
|
||||
|
||||
// Omit remote check if tarball exists locally
|
||||
targetPath := TarballPath(k8sVersion)
|
||||
targetPath := TarballPath(k8sVersion, containerRuntime)
|
||||
if _, err := os.Stat(targetPath); err == nil {
|
||||
glog.Infof("Found local preload: %s", targetPath)
|
||||
return true
|
||||
}
|
||||
|
||||
url := remoteTarballURL(k8sVersion)
|
||||
url := remoteTarballURL(k8sVersion, containerRuntime)
|
||||
resp, err := http.Head(url)
|
||||
if err != nil {
|
||||
glog.Warningf("%s fetch error: %v", url, err)
|
||||
|
|
@ -114,10 +117,7 @@ func PreloadExists(k8sVersion, containerRuntime string) bool {
|
|||
|
||||
// Preload caches the preloaded images tarball on the host machine
|
||||
func Preload(k8sVersion, containerRuntime string) error {
|
||||
if containerRuntime != "docker" {
|
||||
return nil
|
||||
}
|
||||
targetPath := TarballPath(k8sVersion)
|
||||
targetPath := TarballPath(k8sVersion, containerRuntime)
|
||||
|
||||
if _, err := os.Stat(targetPath); err == nil {
|
||||
glog.Infof("Found %s in cache, skipping download", targetPath)
|
||||
|
|
@ -131,7 +131,7 @@ func Preload(k8sVersion, containerRuntime string) error {
|
|||
}
|
||||
|
||||
out.T(out.FileDownload, "Downloading Kubernetes {{.version}} preload ...", out.V{"version": k8sVersion})
|
||||
url := remoteTarballURL(k8sVersion)
|
||||
url := remoteTarballURL(k8sVersion, containerRuntime)
|
||||
|
||||
tmpDst := targetPath + ".download"
|
||||
client := &getter.Client{
|
||||
|
|
@ -146,34 +146,34 @@ func Preload(k8sVersion, containerRuntime string) error {
|
|||
return errors.Wrapf(err, "download failed: %s", url)
|
||||
}
|
||||
|
||||
if err := saveChecksumFile(k8sVersion); err != nil {
|
||||
if err := saveChecksumFile(k8sVersion, containerRuntime); err != nil {
|
||||
return errors.Wrap(err, "saving checksum file")
|
||||
}
|
||||
|
||||
if err := verifyChecksum(k8sVersion, tmpDst); err != nil {
|
||||
if err := verifyChecksum(k8sVersion, containerRuntime, tmpDst); err != nil {
|
||||
return errors.Wrap(err, "verify")
|
||||
}
|
||||
return os.Rename(tmpDst, targetPath)
|
||||
}
|
||||
|
||||
func saveChecksumFile(k8sVersion string) error {
|
||||
glog.Infof("saving checksum for %s ...", TarballName(k8sVersion))
|
||||
func saveChecksumFile(k8sVersion, containerRuntime string) error {
|
||||
glog.Infof("saving checksum for %s ...", TarballName(k8sVersion, containerRuntime))
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx, option.WithoutAuthentication())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "getting storage client")
|
||||
}
|
||||
attrs, err := client.Bucket(PreloadBucket).Object(TarballName(k8sVersion)).Attrs(ctx)
|
||||
attrs, err := client.Bucket(PreloadBucket).Object(TarballName(k8sVersion, containerRuntime)).Attrs(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "getting storage object")
|
||||
}
|
||||
checksum := attrs.MD5
|
||||
return ioutil.WriteFile(PreloadChecksumPath(k8sVersion), checksum, 0644)
|
||||
return ioutil.WriteFile(PreloadChecksumPath(k8sVersion, containerRuntime), checksum, 0644)
|
||||
}
|
||||
|
||||
// verifyChecksum returns true if the checksum of the local binary matches
|
||||
// the checksum of the remote binary
|
||||
func verifyChecksum(k8sVersion string, path string) error {
|
||||
func verifyChecksum(k8sVersion, containerRuntime, path string) error {
|
||||
glog.Infof("verifying checksumm of %s ...", path)
|
||||
// get md5 checksum of tarball path
|
||||
contents, err := ioutil.ReadFile(path)
|
||||
|
|
@ -182,7 +182,7 @@ func verifyChecksum(k8sVersion string, path string) error {
|
|||
}
|
||||
checksum := md5.Sum(contents)
|
||||
|
||||
remoteChecksum, err := ioutil.ReadFile(PreloadChecksumPath(k8sVersion))
|
||||
remoteChecksum, err := ioutil.ReadFile(PreloadChecksumPath(k8sVersion, containerRuntime))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "reading checksum file")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -164,8 +164,8 @@ func FlagDefaults(name string) FlagHints {
|
|||
}
|
||||
|
||||
// Choices returns a list of drivers which are possible on this system
|
||||
func Choices() []registry.DriverState {
|
||||
options := registry.Available()
|
||||
func Choices(vm bool) []registry.DriverState {
|
||||
options := registry.Available(vm)
|
||||
|
||||
// Descending priority for predictability and appearance
|
||||
sort.Slice(options, func(i, j int) bool {
|
||||
|
|
@ -234,13 +234,5 @@ func MachineName(cc config.ClusterConfig, n config.Node) string {
|
|||
if len(cc.Nodes) == 1 || n.ControlPlane {
|
||||
return cc.Name
|
||||
}
|
||||
return fmt.Sprintf("%s---%s", cc.Name, n.Name)
|
||||
}
|
||||
|
||||
// ClusterNameFromMachine retrieves the cluster name embedded in the machine name
|
||||
func ClusterNameFromMachine(name string) (string, string) {
|
||||
if strings.Contains(name, "---") {
|
||||
return strings.Split(name, "---")[0], strings.Split(name, "---")[1]
|
||||
}
|
||||
return name, name
|
||||
return fmt.Sprintf("%s-%s", cc.Name, n.Name)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -162,7 +162,7 @@ func TestSuggest(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
got := Choices()
|
||||
got := Choices(false)
|
||||
gotNames := []string{}
|
||||
for _, c := range got {
|
||||
gotNames = append(gotNames, c.Name)
|
||||
|
|
|
|||
|
|
@ -29,6 +29,10 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
// initflag must be imported before any other minikube pkg.
|
||||
// Fix for https://github.com/kubernetes/minikube/issues/4866
|
||||
_ "k8s.io/minikube/pkg/initflag"
|
||||
|
||||
"github.com/golang-collections/collections/stack"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/minikube/pkg/util/lock"
|
||||
|
|
@ -45,6 +49,7 @@ var blacklist = []string{
|
|||
"env {{.docker_env}}",
|
||||
"\\n",
|
||||
"==\u003e {{.name}} \u003c==",
|
||||
"- {{.profile}}",
|
||||
}
|
||||
|
||||
// ErrMapFile is a constant to refer to the err_map file, which contains the Advice strings.
|
||||
|
|
@ -450,14 +455,17 @@ func writeStringsToFiles(e *state, output string) error {
|
|||
return nil
|
||||
}
|
||||
fmt.Printf("Writing to %s\n", filepath.Base(path))
|
||||
var currentTranslations map[string]interface{}
|
||||
currentTranslations := make(map[string]interface{})
|
||||
f, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "reading translation file")
|
||||
}
|
||||
err = json.Unmarshal(f, ¤tTranslations)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unmarshalling current translations")
|
||||
// Unmarhsal nonempty files
|
||||
if len(f) > 0 {
|
||||
err = json.Unmarshal(f, ¤tTranslations)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unmarshalling current translations")
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure to not overwrite already translated strings
|
||||
|
|
|
|||
|
|
@ -17,11 +17,13 @@ limitations under the License.
|
|||
package machine
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
// Driver used by testdata
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
_ "k8s.io/minikube/pkg/minikube/registry/drvs/virtualbox"
|
||||
|
||||
"github.com/docker/machine/libmachine/drivers"
|
||||
|
|
@ -41,6 +43,11 @@ func createMockDriverHost(c config.ClusterConfig, n config.Node) (interface{}, e
|
|||
}
|
||||
|
||||
func RegisterMockDriver(t *testing.T) {
|
||||
// Debugging this test is a nightmare.
|
||||
if err := flag.Lookup("logtostderr").Value.Set("true"); err != nil {
|
||||
t.Logf("unable to set logtostderr: %v", err)
|
||||
}
|
||||
|
||||
t.Helper()
|
||||
if !registry.Driver(driver.Mock).Empty() {
|
||||
return
|
||||
|
|
@ -163,7 +170,7 @@ func TestStartHostErrMachineNotExist(t *testing.T) {
|
|||
// This should pass with creating host, while machine does not exist.
|
||||
h, _, err = StartHost(api, mc, n)
|
||||
if err != nil {
|
||||
if err != ErrorMachineNotExist {
|
||||
if err != constants.ErrMachineMissing {
|
||||
t.Fatalf("Error starting host: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,11 +22,13 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/docker/machine/libmachine"
|
||||
"github.com/docker/machine/libmachine/host"
|
||||
"github.com/docker/machine/libmachine/mcnerror"
|
||||
"github.com/docker/machine/libmachine/state"
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/minikube/pkg/drivers/kic/oci"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
)
|
||||
|
|
@ -85,11 +87,16 @@ func DeleteHost(api libmachine.API, machineName string) error {
|
|||
}
|
||||
|
||||
out.T(out.DeletingHost, `Deleting "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": machineName, "driver_name": host.DriverName})
|
||||
if err := host.Driver.Remove(); err != nil {
|
||||
glog.Warningf("remove failed, will retry: %v", err)
|
||||
time.Sleep(2 * time.Second)
|
||||
return delete(api, host, machineName)
|
||||
}
|
||||
|
||||
nerr := host.Driver.Remove()
|
||||
// delete removes a host and it's local data files
|
||||
func delete(api libmachine.API, h *host.Host, machineName string) error {
|
||||
if err := h.Driver.Remove(); err != nil {
|
||||
glog.Warningf("remove failed, will retry: %v", err)
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
nerr := h.Driver.Remove()
|
||||
if nerr != nil {
|
||||
return errors.Wrap(nerr, "host remove retry")
|
||||
}
|
||||
|
|
@ -100,3 +107,24 @@ func DeleteHost(api libmachine.API, machineName string) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// demolish destroys a host by any means necessary - use only if state is inconsistent
|
||||
func demolish(api libmachine.API, cc config.ClusterConfig, n config.Node, h *host.Host) {
|
||||
machineName := driver.MachineName(cc, n)
|
||||
glog.Infof("DEMOLISHING %s ...", machineName)
|
||||
|
||||
// This will probably fail
|
||||
err := stop(h)
|
||||
if err != nil {
|
||||
glog.Infof("stophost failed (probably ok): %v", err)
|
||||
}
|
||||
|
||||
// For 95% of cases, this should be enough
|
||||
err = DeleteHost(api, machineName)
|
||||
if err != nil {
|
||||
glog.Warningf("deletehost failed: %v", err)
|
||||
}
|
||||
|
||||
err = delete(api, h, machineName)
|
||||
glog.Warningf("delete failed (probably ok) %v", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -47,15 +47,8 @@ const (
|
|||
maxClockDesyncSeconds = 2.1
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrorMachineNotExist is returned when virtual machine does not exist due to user interrupt cancel(i.e. Ctrl + C)
|
||||
ErrorMachineNotExist = errors.New("machine does not exist")
|
||||
)
|
||||
|
||||
// fixHost fixes up a previously configured VM so that it is ready to run Kubernetes
|
||||
func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.Host, error) {
|
||||
out.T(out.Waiting, "Reconfiguring existing host ...")
|
||||
|
||||
start := time.Now()
|
||||
glog.Infof("fixHost starting: %s", n.Name)
|
||||
defer func() {
|
||||
|
|
@ -67,21 +60,24 @@ func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.
|
|||
return h, errors.Wrap(err, "Error loading existing host. Please try running [minikube delete], then run [minikube start] again.")
|
||||
}
|
||||
|
||||
driverName := h.Driver.DriverName()
|
||||
|
||||
// check if need to re-run docker-env
|
||||
maybeWarnAboutEvalEnv(cc.Driver, cc.Name)
|
||||
maybeWarnAboutEvalEnv(driverName, cc.Name)
|
||||
|
||||
h, err = recreateIfNeeded(api, cc, n, h)
|
||||
if err != nil {
|
||||
return h, err
|
||||
}
|
||||
|
||||
// Technically, we should only have to call provision if Docker has changed,
|
||||
// but who can predict what shape the existing VM is in.
|
||||
e := engineOptions(cc)
|
||||
h.HostOptions.EngineOptions.Env = e.Env
|
||||
err = provisionDockerMachine(h)
|
||||
if err != nil {
|
||||
return h, errors.Wrap(err, "provision")
|
||||
// Avoid reprovisioning "none" driver because provision.Detect requires SSH
|
||||
if !driver.BareMetal(h.Driver.DriverName()) {
|
||||
e := engineOptions(cc)
|
||||
h.HostOptions.EngineOptions.Env = e.Env
|
||||
err = provisionDockerMachine(h)
|
||||
if err != nil {
|
||||
return h, errors.Wrap(err, "provision")
|
||||
}
|
||||
}
|
||||
|
||||
if driver.IsMock(h.DriverName) {
|
||||
|
|
@ -93,59 +89,63 @@ func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.
|
|||
}
|
||||
|
||||
if driver.BareMetal(h.Driver.DriverName()) {
|
||||
glog.Infof("%s is local, skipping auth/time setup (requires ssh)", h.Driver.DriverName())
|
||||
glog.Infof("%s is local, skipping auth/time setup (requires ssh)", driverName)
|
||||
return h, nil
|
||||
}
|
||||
|
||||
return h, ensureSyncedGuestClock(h, cc.Driver)
|
||||
return h, ensureSyncedGuestClock(h, driverName)
|
||||
}
|
||||
|
||||
func recreateIfNeeded(api libmachine.API, cc config.ClusterConfig, n config.Node, h *host.Host) (*host.Host, error) {
|
||||
s, err := h.Driver.GetState()
|
||||
if err != nil || s == state.Stopped || s == state.None {
|
||||
// If virtual machine does not exist due to user interrupt cancel(i.e. Ctrl + C), recreate virtual machine
|
||||
me, err := machineExists(h.Driver.DriverName(), s, err)
|
||||
if !me {
|
||||
// If the error is that virtual machine does not exist error, handle error(recreate virtual machine)
|
||||
if err == ErrorMachineNotExist {
|
||||
// remove virtual machine
|
||||
if err := h.Driver.Remove(); err != nil {
|
||||
// skip returning error since it may be before docker image pulling(so, no host exist)
|
||||
if h.Driver.DriverName() != driver.Docker {
|
||||
return nil, errors.Wrap(err, "host remove")
|
||||
}
|
||||
}
|
||||
// remove machine config directory
|
||||
if err := api.Remove(cc.Name); err != nil {
|
||||
return nil, errors.Wrap(err, "api remove")
|
||||
}
|
||||
// recreate virtual machine
|
||||
out.T(out.Meh, "machine '{{.name}}' does not exist. Proceeding ahead with recreating VM.", out.V{"name": cc.Name})
|
||||
h, err = createHost(api, cc, n)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error recreating VM")
|
||||
}
|
||||
// return ErrMachineNotExist err to initialize preExists flag
|
||||
return h, ErrorMachineNotExist
|
||||
}
|
||||
// If the error is not that virtual machine does not exist error, return error
|
||||
return nil, errors.Wrap(err, "Error getting state for host")
|
||||
}
|
||||
}
|
||||
|
||||
machineName := driver.MachineName(cc, n)
|
||||
machineType := driver.MachineType(cc.Driver)
|
||||
if s == state.Running {
|
||||
out.T(out.Running, `Using the running {{.driver_name}} "{{.profile_name}}" {{.machine_type}} ...`, out.V{"driver_name": cc.Driver, "profile_name": cc.Name, "machine_type": machineType})
|
||||
} else {
|
||||
out.T(out.Restarting, `Starting existing {{.driver_name}} {{.machine_type}} for "{{.profile_name}}" ...`, out.V{"driver_name": cc.Driver, "profile_name": cc.Name, "machine_type": machineType})
|
||||
if err := h.Driver.Start(); err != nil {
|
||||
return h, errors.Wrap(err, "driver start")
|
||||
}
|
||||
if err := api.Save(h); err != nil {
|
||||
return h, errors.Wrap(err, "save")
|
||||
recreated := false
|
||||
s, serr := h.Driver.GetState()
|
||||
|
||||
glog.Infof("recreateIfNeeded on %s: state=%s err=%v", machineName, s, serr)
|
||||
if serr != nil || s == state.Stopped || s == state.None {
|
||||
// If virtual machine does not exist due to user interrupt cancel(i.e. Ctrl + C), recreate virtual machine
|
||||
me, err := machineExists(h.Driver.DriverName(), s, serr)
|
||||
glog.Infof("exists: %v err=%v", me, err)
|
||||
glog.Infof("%q vs %q", err, constants.ErrMachineMissing)
|
||||
|
||||
if !me || err == constants.ErrMachineMissing {
|
||||
out.T(out.Shrug, `{{.driver_name}} "{{.cluster}}" {{.machine_type}} is missing, will recreate.`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType})
|
||||
demolish(api, cc, n, h)
|
||||
|
||||
glog.Infof("Sleeping 1 second for extra luck!")
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
h, err = createHost(api, cc, n)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "recreate")
|
||||
}
|
||||
|
||||
recreated = true
|
||||
s, serr = h.Driver.GetState()
|
||||
}
|
||||
}
|
||||
|
||||
if serr != constants.ErrMachineMissing {
|
||||
glog.Warningf("unexpected machine state, will restart: %v", serr)
|
||||
}
|
||||
|
||||
if s == state.Running {
|
||||
if !recreated {
|
||||
out.T(out.Running, `Updating the running {{.driver_name}} "{{.cluster}}" {{.machine_type}} ...`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType})
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
if !recreated {
|
||||
out.T(out.Restarting, `Restarting existing {{.driver_name}} {{.machine_type}} for "{{.cluster}}" ...`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType})
|
||||
}
|
||||
if err := h.Driver.Start(); err != nil {
|
||||
return h, errors.Wrap(err, "driver start")
|
||||
}
|
||||
if err := api.Save(h); err != nil {
|
||||
return h, errors.Wrap(err, "save")
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
|
|
@ -222,7 +222,7 @@ func adjustGuestClock(h hostRunner, t time.Time) error {
|
|||
|
||||
func machineExistsState(s state.State, err error) (bool, error) {
|
||||
if s == state.None {
|
||||
return false, ErrorMachineNotExist
|
||||
return false, constants.ErrMachineMissing
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
|
|
@ -231,7 +231,7 @@ func machineExistsError(s state.State, err error, drverr error) (bool, error) {
|
|||
_ = s // not used
|
||||
if err == drverr {
|
||||
// if the error matches driver error
|
||||
return false, ErrorMachineNotExist
|
||||
return false, constants.ErrMachineMissing
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
|
|
@ -239,7 +239,7 @@ func machineExistsError(s state.State, err error, drverr error) (bool, error) {
|
|||
func machineExistsMessage(s state.State, err error, msg string) (bool, error) {
|
||||
if s == state.None || (err != nil && err.Error() == msg) {
|
||||
// if the error contains the message
|
||||
return false, ErrorMachineNotExist
|
||||
return false, constants.ErrMachineMissing
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
|
|
@ -247,10 +247,10 @@ func machineExistsMessage(s state.State, err error, msg string) (bool, error) {
|
|||
func machineExistsDocker(s state.State, err error) (bool, error) {
|
||||
if s == state.Error {
|
||||
// if the kic image is not present on the host machine, when user cancel `minikube start`, state.Error will be return
|
||||
return false, ErrorMachineNotExist
|
||||
return false, constants.ErrMachineMissing
|
||||
} else if s == state.None {
|
||||
// if the kic image is present on the host machine, when user cancel `minikube start`, state.None will be return
|
||||
return false, ErrorMachineNotExist
|
||||
return false, constants.ErrMachineMissing
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
|
|
@ -282,7 +282,7 @@ func machineExists(d string, s state.State, err error) (bool, error) {
|
|||
return machineExistsDocker(s, err)
|
||||
case driver.Mock:
|
||||
if s == state.Error {
|
||||
return false, ErrorMachineNotExist
|
||||
return false, constants.ErrMachineMissing
|
||||
}
|
||||
return true, err
|
||||
default:
|
||||
|
|
|
|||
|
|
@ -18,13 +18,14 @@ package machine
|
|||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os/exec"
|
||||
|
||||
"github.com/docker/machine/libmachine/drivers"
|
||||
"github.com/docker/machine/libmachine/provision"
|
||||
"github.com/golang/glog"
|
||||
"github.com/shirou/gopsutil/cpu"
|
||||
"github.com/shirou/gopsutil/disk"
|
||||
"github.com/shirou/gopsutil/mem"
|
||||
"k8s.io/minikube/pkg/minikube/command"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
)
|
||||
|
||||
|
|
@ -80,18 +81,17 @@ func showLocalOsRelease() {
|
|||
}
|
||||
|
||||
// logRemoteOsRelease shows systemd information about the current linux distribution, on the remote VM
|
||||
func logRemoteOsRelease(drv drivers.Driver) {
|
||||
provisioner, err := provision.DetectProvisioner(drv)
|
||||
func logRemoteOsRelease(r command.Runner) {
|
||||
rr, err := r.RunCmd(exec.Command("cat", "/etc/os-release"))
|
||||
if err != nil {
|
||||
glog.Errorf("DetectProvisioner: %v", err)
|
||||
glog.Infof("remote release failed: %v", err)
|
||||
}
|
||||
|
||||
osReleaseInfo, err := provision.NewOsRelease(rr.Stdout.Bytes())
|
||||
if err != nil {
|
||||
glog.Errorf("NewOsRelease: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
osReleaseInfo, err := provisioner.GetOsReleaseInfo()
|
||||
if err != nil {
|
||||
glog.Errorf("GetOsReleaseInfo: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
glog.Infof("Provisioned with %s", osReleaseInfo.PrettyName)
|
||||
glog.Infof("Remote host: %s", osReleaseInfo.PrettyName)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -212,7 +212,7 @@ func postStartSetup(h *host.Host, mc config.ClusterConfig) error {
|
|||
showLocalOsRelease()
|
||||
}
|
||||
if driver.IsVM(mc.Driver) {
|
||||
logRemoteOsRelease(h.Driver)
|
||||
logRemoteOsRelease(r)
|
||||
}
|
||||
return syncLocalAssets(r)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,6 +17,8 @@ limitations under the License.
|
|||
package machine
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/machine/libmachine"
|
||||
"github.com/docker/machine/libmachine/host"
|
||||
"github.com/docker/machine/libmachine/mcnerror"
|
||||
|
|
@ -30,26 +32,36 @@ import (
|
|||
|
||||
// StopHost stops the host VM, saving state to disk.
|
||||
func StopHost(api libmachine.API, machineName string) error {
|
||||
host, err := api.Load(machineName)
|
||||
glog.Infof("StopHost: %v", machineName)
|
||||
h, err := api.Load(machineName)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "load")
|
||||
}
|
||||
|
||||
out.T(out.Stopping, `Stopping "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": machineName, "driver_name": host.DriverName})
|
||||
if host.DriverName == driver.HyperV {
|
||||
out.T(out.Stopping, `Stopping "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": machineName, "driver_name": h.DriverName})
|
||||
return stop(h)
|
||||
}
|
||||
|
||||
// stop forcibly stops a host without needing to load
|
||||
func stop(h *host.Host) error {
|
||||
start := time.Now()
|
||||
if h.DriverName == driver.HyperV {
|
||||
glog.Infof("As there are issues with stopping Hyper-V VMs using API, trying to shut down using SSH")
|
||||
if err := trySSHPowerOff(host); err != nil {
|
||||
if err := trySSHPowerOff(h); err != nil {
|
||||
return errors.Wrap(err, "ssh power off")
|
||||
}
|
||||
}
|
||||
|
||||
if err := host.Stop(); err != nil {
|
||||
alreadyInStateError, ok := err.(mcnerror.ErrHostAlreadyInState)
|
||||
if ok && alreadyInStateError.State == state.Stopped {
|
||||
if err := h.Stop(); err != nil {
|
||||
glog.Infof("stop err: %v", err)
|
||||
st, ok := err.(mcnerror.ErrHostAlreadyInState)
|
||||
if ok && st.State == state.Stopped {
|
||||
glog.Infof("host is already stopped")
|
||||
return nil
|
||||
}
|
||||
return &retry.RetriableError{Err: errors.Wrapf(err, "Stop: %s", machineName)}
|
||||
return &retry.RetriableError{Err: errors.Wrap(err, "stop")}
|
||||
}
|
||||
glog.Infof("stop complete within %s", time.Since(start))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -348,7 +348,8 @@ func startHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*hos
|
|||
out.T(out.Workaround, `Run: "{{.delete}}", then "{{.start}} --alsologtostderr -v=1" to try again with more logging`,
|
||||
out.V{"delete": mustload.ExampleCmd(cc.Name, "delete"), "start": mustload.ExampleCmd(cc.Name, "start")})
|
||||
|
||||
exit.WithError("Unable to start VM after repeated tries. Please try {{'minikube delete' if possible", err)
|
||||
drv := cc.Driver
|
||||
exit.WithError(fmt.Sprintf(`Failed to start %s %s. "%s" may fix it.`, drv, driver.MachineType(drv), mustload.ExampleCmd(cc.Name, "start")), err)
|
||||
return host, exists
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -60,7 +60,6 @@ var styles = map[StyleEnum]style{
|
|||
Running: {Prefix: "🏃 "},
|
||||
Provisioning: {Prefix: "🌱 "},
|
||||
Restarting: {Prefix: "🔄 "},
|
||||
Reconfiguring: {Prefix: "📯 "},
|
||||
Stopping: {Prefix: "✋ "},
|
||||
Stopped: {Prefix: "🛑 "},
|
||||
Warning: {Prefix: "❗ ", LowPrefix: lowWarning},
|
||||
|
|
@ -92,7 +91,7 @@ var styles = map[StyleEnum]style{
|
|||
Caching: {Prefix: "🤹 "},
|
||||
StartingVM: {Prefix: "🔥 "},
|
||||
StartingNone: {Prefix: "🤹 "},
|
||||
Provisioner: {Prefix: "ℹ️ "},
|
||||
Provisioner: {Prefix: "ℹ️ "},
|
||||
Resetting: {Prefix: "🔄 "},
|
||||
DeletingHost: {Prefix: "🔥 "},
|
||||
Copying: {Prefix: "✨ "},
|
||||
|
|
@ -117,7 +116,7 @@ var styles = map[StyleEnum]style{
|
|||
Unmount: {Prefix: "🔥 "},
|
||||
MountOptions: {Prefix: "💾 "},
|
||||
Fileserver: {Prefix: "🚀 ", OmitNewline: true},
|
||||
DryRun: {Prefix: "🏜️ "},
|
||||
DryRun: {Prefix: "🌵 "},
|
||||
AddonEnable: {Prefix: "🌟 "},
|
||||
AddonDisable: {Prefix: "🌑 "},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,7 +32,6 @@ const (
|
|||
Running
|
||||
Provisioning
|
||||
Restarting
|
||||
Reconfiguring
|
||||
Stopping
|
||||
Stopped
|
||||
Warning
|
||||
|
|
|
|||
|
|
@ -24,6 +24,40 @@ import (
|
|||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
// Podman is Kubernetes in container using podman driver
|
||||
Podman = "podman"
|
||||
// Docker is Kubernetes in container using docker driver
|
||||
Docker = "docker"
|
||||
// Mock driver
|
||||
Mock = "mock"
|
||||
// None driver
|
||||
None = "none"
|
||||
)
|
||||
|
||||
// IsKIC checks if the driver is a kubernetes in container
|
||||
func IsKIC(name string) bool {
|
||||
return name == Docker || name == Podman
|
||||
}
|
||||
|
||||
// IsMock checks if the driver is a mock
|
||||
func IsMock(name string) bool {
|
||||
return name == Mock
|
||||
}
|
||||
|
||||
// IsVM checks if the driver is a VM
|
||||
func IsVM(name string) bool {
|
||||
if IsKIC(name) || IsMock(name) || BareMetal(name) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// BareMetal returns if this driver is unisolated
|
||||
func BareMetal(name string) bool {
|
||||
return name == None || name == Mock
|
||||
}
|
||||
|
||||
var (
|
||||
// globalRegistry is a globally accessible driver registry
|
||||
globalRegistry = newRegistry()
|
||||
|
|
@ -59,7 +93,7 @@ func Driver(name string) DriverDef {
|
|||
}
|
||||
|
||||
// Available returns a list of available drivers in the global registry
|
||||
func Available() []DriverState {
|
||||
func Available(vm bool) []DriverState {
|
||||
sts := []DriverState{}
|
||||
glog.Infof("Querying for installed drivers using PATH=%s", os.Getenv("PATH"))
|
||||
|
||||
|
|
@ -76,7 +110,13 @@ func Available() []DriverState {
|
|||
priority = Unhealthy
|
||||
}
|
||||
|
||||
sts = append(sts, DriverState{Name: d.Name, Priority: priority, State: s})
|
||||
if vm {
|
||||
if IsVM(d.Name) {
|
||||
sts = append(sts, DriverState{Name: d.Name, Priority: priority, State: s})
|
||||
}
|
||||
} else {
|
||||
sts = append(sts, DriverState{Name: d.Name, Priority: priority, State: s})
|
||||
}
|
||||
}
|
||||
|
||||
// Descending priority for predictability
|
||||
|
|
|
|||
|
|
@ -102,7 +102,7 @@ func TestGlobalAvailable(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(Available(), expected); diff != "" {
|
||||
if diff := cmp.Diff(Available(false), expected); diff != "" {
|
||||
t.Errorf("available mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package tests
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/machine/libmachine/drivers"
|
||||
|
|
@ -24,6 +25,7 @@ import (
|
|||
"github.com/docker/machine/libmachine/state"
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
)
|
||||
|
||||
// MockDriver is a struct used to mock out libmachine.Driver
|
||||
|
|
@ -96,11 +98,14 @@ func (d *MockDriver) GetSSHKeyPath() string {
|
|||
|
||||
// GetState returns the state of the driver
|
||||
func (d *MockDriver) GetState() (state.State, error) {
|
||||
d.Logf("MockDriver.GetState: %v", d.CurrentState)
|
||||
if d.NotExistError {
|
||||
_, file, no, _ := runtime.Caller(2)
|
||||
d.Logf("MockDriver.GetState called from %s#%d: returning %q", file, no, d.CurrentState)
|
||||
|
||||
// NOTE: this logic is questionable
|
||||
if d.NotExistError && d.CurrentState != state.Stopped && d.CurrentState != state.None {
|
||||
d.CurrentState = state.Error
|
||||
// don't use cluster.ErrorMachineNotExist to avoid import cycle
|
||||
return d.CurrentState, errors.New("machine does not exist")
|
||||
d.Logf("mock NotExistError set, setting state=%s err=%v", d.CurrentState, constants.ErrMachineMissing)
|
||||
return d.CurrentState, constants.ErrMachineMissing
|
||||
}
|
||||
return d.CurrentState, nil
|
||||
}
|
||||
|
|
@ -123,12 +128,13 @@ func (d *MockDriver) Remove() error {
|
|||
if d.RemoveError {
|
||||
return errors.New("error deleting machine")
|
||||
}
|
||||
d.NotExistError = false
|
||||
return nil
|
||||
}
|
||||
|
||||
// Restart restarts the machine
|
||||
func (d *MockDriver) Restart() error {
|
||||
d.Logf("MockDriver.Restart")
|
||||
d.Logf("MockDriver.Restart, setting CurrentState=%s", state.Running)
|
||||
d.CurrentState = state.Running
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,6 +18,8 @@ package translate
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/cloudfoundry-attic/jibber_jabber"
|
||||
|
|
@ -73,11 +75,23 @@ func DetermineLocale() {
|
|||
}
|
||||
|
||||
// Load translations for preferred language into memory.
|
||||
translationFile := "translations/" + preferredLanguage.String() + ".json"
|
||||
p := preferredLanguage.String()
|
||||
translationFile := path.Join("translations", fmt.Sprintf("%s.json", p))
|
||||
t, err := Asset(translationFile)
|
||||
if err != nil {
|
||||
glog.Infof("Failed to load translation file for %s: %v", preferredLanguage.String(), err)
|
||||
return
|
||||
// Attempt to find a more broad locale, e.g. fr instead of fr-FR.
|
||||
if strings.Contains(p, "-") {
|
||||
p = strings.Split(p, "-")[0]
|
||||
translationFile := path.Join("translations", fmt.Sprintf("%s.json", p))
|
||||
t, err = Asset(translationFile)
|
||||
if err != nil {
|
||||
glog.Infof("Failed to load translation file for %s: %v", p, err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
glog.Infof("Failed to load translation file for %s: %v", preferredLanguage.String(), err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
err = json.Unmarshal(t, &Translations)
|
||||
|
|
|
|||
|
|
@ -29,18 +29,22 @@ import (
|
|||
"github.com/docker/machine/libmachine/provision/pkgaction"
|
||||
"github.com/docker/machine/libmachine/swarm"
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/util/retry"
|
||||
)
|
||||
|
||||
// BuildrootProvisioner provisions the custom system based on Buildroot
|
||||
type BuildrootProvisioner struct {
|
||||
provision.SystemdProvisioner
|
||||
clusterName string
|
||||
}
|
||||
|
||||
// NewBuildrootProvisioner creates a new BuildrootProvisioner
|
||||
func NewBuildrootProvisioner(d drivers.Driver) provision.Provisioner {
|
||||
return &BuildrootProvisioner{
|
||||
NewSystemdProvisioner("buildroot", d),
|
||||
viper.GetString(config.ProfileName),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -180,7 +184,7 @@ func (p *BuildrootProvisioner) Provision(swarmOptions swarm.Options, authOptions
|
|||
}
|
||||
|
||||
glog.Infof("setting minikube options for container-runtime")
|
||||
if err := setContainerRuntimeOptions(p.Driver.GetMachineName(), p); err != nil {
|
||||
if err := setContainerRuntimeOptions(p.clusterName, p); err != nil {
|
||||
glog.Infof("Error setting container-runtime options during provisioning %v", err)
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -39,7 +39,6 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/assets"
|
||||
"k8s.io/minikube/pkg/minikube/command"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/sshutil"
|
||||
)
|
||||
|
||||
|
|
@ -209,8 +208,7 @@ func setRemoteAuthOptions(p provision.Provisioner) auth.Options {
|
|||
}
|
||||
|
||||
func setContainerRuntimeOptions(name string, p miniProvisioner) error {
|
||||
cluster, _ := driver.ClusterNameFromMachine(name)
|
||||
c, err := config.Load(cluster)
|
||||
c, err := config.Load(name)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "getting cluster config")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -29,6 +29,8 @@ import (
|
|||
"github.com/docker/machine/libmachine/provision/pkgaction"
|
||||
"github.com/docker/machine/libmachine/swarm"
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/util/retry"
|
||||
)
|
||||
|
||||
|
|
@ -42,6 +44,7 @@ func NewUbuntuProvisioner(d drivers.Driver) provision.Provisioner {
|
|||
return &UbuntuProvisioner{
|
||||
BuildrootProvisioner{
|
||||
NewSystemdProvisioner("ubuntu", d),
|
||||
viper.GetString(config.ProfileName),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
@ -185,7 +188,7 @@ func (p *UbuntuProvisioner) Provision(swarmOptions swarm.Options, authOptions au
|
|||
}
|
||||
|
||||
glog.Infof("setting minikube options for container-runtime")
|
||||
if err := setContainerRuntimeOptions(p.Driver.GetMachineName(), p); err != nil {
|
||||
if err := setContainerRuntimeOptions(p.clusterName, p); err != nil {
|
||||
glog.Infof("Error setting container-runtime options during provisioning %v", err)
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -95,7 +95,7 @@ weight = 1
|
|||
[params]
|
||||
copyright = "The Kubernetes Authors -- "
|
||||
# The latest release of minikube
|
||||
latest_release = "1.8.1"
|
||||
latest_release = "1.9.0"
|
||||
|
||||
privacy_policy = ""
|
||||
|
||||
|
|
@ -112,7 +112,10 @@ github_project_repo = ""
|
|||
github_subdir = "site"
|
||||
|
||||
# Google Custom Search Engine ID. Remove or comment out to disable search.
|
||||
gcs_engine_id = "005331096405080631692:s7c4yfpw9sy"
|
||||
# gcs_engine_id = "005331096405080631692:s7c4yfpw9sy"
|
||||
|
||||
# enabling local search https://www.docsy.dev/docs/adding-content/navigation/#configure-local-search-with-lunr
|
||||
offlineSearch = true
|
||||
|
||||
# User interface configuration
|
||||
[params.ui]
|
||||
|
|
|
|||
|
|
@ -83,12 +83,14 @@ A single command away from reproducing your production environment, from the com
|
|||
{{% /blocks/feature %}}
|
||||
|
||||
{{% blocks/feature icon="fa-thumbs-up" title="Cross-platform" %}}
|
||||
- Bare-metal
|
||||
- HyperKit
|
||||
- Hyper-V
|
||||
- KVM
|
||||
- Docker
|
||||
- HyperKit
|
||||
- Bare-metal
|
||||
- VirtualBox
|
||||
- Hyper-V
|
||||
- VMware
|
||||
- Podman
|
||||
{{% /blocks/feature %}}
|
||||
{{< /blocks/section >}}
|
||||
|
||||
|
|
|
|||
|
|
@ -8,14 +8,84 @@ description: >
|
|||
|
||||
All translations are stored in the top-level `translations` directory.
|
||||
|
||||
### Adding a New Language
|
||||
* Add a new json file in the translations directory with the locale code of the language you want to add
|
||||
translations for, e.g. fr for French.
|
||||
```
|
||||
~/minikube$ touch translations/fr.json
|
||||
~/minikube$ ls translations/
|
||||
de.json es.json fr.json ja.json ko.json pl.json zh-CN.json
|
||||
```
|
||||
* Run `make extract` from root to populate that file with the strings to translate in json
|
||||
form.
|
||||
```
|
||||
~/minikube$ make extract
|
||||
go run cmd/extract/extract.go
|
||||
Compiling translation strings...
|
||||
Writing to de.json
|
||||
Writing to es.json
|
||||
Writing to fr.json
|
||||
Writing to ja.json
|
||||
Writing to ko.json
|
||||
Writing to pl.json
|
||||
Writing to zh-CN.json
|
||||
Done!
|
||||
```
|
||||
* Add translated strings to the json file as the value of the map where the English phrase is the key.
|
||||
```
|
||||
~/minikube$ head translations/fr.json
|
||||
{
|
||||
"\"The '{{.minikube_addon}}' addon is disabled": "",
|
||||
"\"{{.machineName}}\" does not exist, nothing to stop": "",
|
||||
"\"{{.name}}\" profile does not exist, trying anyways.": "",
|
||||
"'none' driver does not support 'minikube docker-env' command": "",
|
||||
"'none' driver does not support 'minikube mount' command": "",
|
||||
"'none' driver does not support 'minikube podman-env' command": "",
|
||||
"'none' driver does not support 'minikube ssh' command": "",
|
||||
"'{{.driver}}' driver reported an issue: {{.error}}": "",
|
||||
```
|
||||
* Add the translations as the values of the map, keeping in mind that anything in double braces `{{}}` are variable names describing what gets injected and should not be translated.
|
||||
```
|
||||
~/minikube$ vi translations/fr.json
|
||||
{
|
||||
[...]
|
||||
"Amount of time to wait for a service in seconds": "",
|
||||
"Amount of time to wait for service in seconds": "",
|
||||
"Another hypervisor, such as VirtualBox, is conflicting with KVM. Please stop the other hypervisor, or use --driver to switch to it.": "",
|
||||
"Automatically selected the {{.driver}} driver": "Choix automatique du driver {{.driver}}",
|
||||
"Automatically selected the {{.driver}} driver. Other choices: {{.alternates}}": "Choix automatique du driver {{.driver}}. Autres choix: {{.alternatives}}",
|
||||
"Available Commands": "",
|
||||
"Basic Commands:": "",
|
||||
"Because you are using docker driver on Mac, the terminal needs to be open to run it.": "",
|
||||
[...]
|
||||
}
|
||||
```
|
||||
|
||||
### Adding Translations To an Existing Language
|
||||
* Run `make extract` to make sure all strings are up to date
|
||||
* Add translated strings to the appropriate json files in the 'translations'
|
||||
directory.
|
||||
* Edit the appropriate json file in the 'translations' directory, in the same way as described above.
|
||||
|
||||
### Adding a New Language
|
||||
* Add a new json file with the locale code of the language you want to add
|
||||
translations for, e.g. en for English.
|
||||
* Run `make extract` to populate that file with the strings to translate in json
|
||||
form.
|
||||
* Add translations to as many strings as you'd like.
|
||||
### Testing translations
|
||||
* Once you have all the translations you want, save the file and rebuild the minikube from scratch to pick up your new translations:
|
||||
```
|
||||
~/minikube$ make clean
|
||||
rm -rf ./out
|
||||
rm -f pkg/minikube/assets/assets.go
|
||||
rm -f pkg/minikube/translate/translations.go
|
||||
rm -rf ./vendor
|
||||
~/minikube$ make
|
||||
```
|
||||
Note: the clean is required to regenerate the embedded `translations.go` file
|
||||
|
||||
* You now have a fresh minikube binary in the `out` directory. If your system locale is that of the language you added translations for, a simple `out/minikube start` will work as a test, assuming you translated phrases from `minikube start`. You can use whatever command you'd like in that way.
|
||||
|
||||
* If you have a different system locale, you can override the printed language using the LANG environment variable:
|
||||
```
|
||||
~/minikube$ LANG=fr out/minikube start
|
||||
😄 minikube v1.9.0-beta.2 sur Darwin 10.14.6
|
||||
✨ Choix automatique du driver hyperkit
|
||||
🔥 Création de VM hyperkit (CPUs=2, Mémoire=4000MB, Disque=20000MB)...
|
||||
🐳 Préparation de Kubernetes v1.18.0 sur Docker 19.03.8...
|
||||
🌟 Installation des addons: default-storageclass, storage-provisioner
|
||||
🏄 Terminé ! kubectl est maintenant configuré pour utiliser "minikube".
|
||||
```
|
||||
|
|
|
|||
|
|
@ -62,6 +62,7 @@ If the issue is specific to an operating system, hypervisor, container, addon, o
|
|||
- `co/kvm2`
|
||||
- `co/none-driver`
|
||||
- `co/docker-driver`
|
||||
- `co/podman-driver`
|
||||
- `co/virtualbox`
|
||||
|
||||
**co/[kubernetes component]** - When the issue appears specific to a k8s component
|
||||
|
|
|
|||
|
|
@ -4,28 +4,18 @@ linkTitle: "docker"
|
|||
weight: 3
|
||||
date: 2020-02-05
|
||||
description: >
|
||||
Docker driver (EXPERIMENTAL)
|
||||
Docker driver
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
The Docker driver is an experimental VM-free driver that ships with minikube v1.7.
|
||||
The Docker driver is the newest minikube driver. which runs kubernetes in container VM-free ! with full feature parity with minikube in VM.
|
||||
|
||||
{{% readfile file="/docs/Reference/Drivers/includes/docker_usage.inc" %}}
|
||||
|
||||
This driver was inspired by the [kind project](https://kind.sigs.k8s.io/), and uses a modified version of its base image.
|
||||
|
||||
## Special features
|
||||
- Cross platform (linux, macos, windows)
|
||||
- No hypervisor required when run on Linux.
|
||||
|
||||
No hypervisor required when run on Linux.
|
||||
|
||||
## Limitations
|
||||
|
||||
As an experimental driver, not all commands are supported on all platforms. Notably: `mount,` `service`, `tunnel`, and others. Most of these limitations will be addressed by minikube v1.8 (March 2020)
|
||||
|
||||
## Issues
|
||||
|
||||
* [Full list of open 'kic-driver' issues](https://github.com/kubernetes/minikube/labels/co%2Fkic-driver)
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
* Run `minikube start --alsologtostderr -v=1` to debug crashes
|
||||
* If your docker is too slow on mac os try [Improving docker performance](https://docs.docker.com/docker-for-mac/osxfs-caching/)
|
||||
|
|
|
|||
|
|
@ -7,7 +7,6 @@ date: 2018-08-05
|
|||
description: >
|
||||
Microsoft Hyper-V driver
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
Hyper-V is a native hypervisor built in to modern versions of Microsoft Windows.
|
||||
|
|
|
|||
|
|
@ -0,0 +1,7 @@
|
|||
To use baremetal driver (none driver). verify that your operating system is Linux and also have 'systemd' installed.
|
||||
|
||||
```shell
|
||||
pidof systemd && echo "yes" || echo "no"
|
||||
```
|
||||
If the above command outputs "no":
|
||||
Your system is not suitable for none driver.
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
To use VM drivers, verify that your system has virtualization support enabled:
|
||||
|
||||
```shell
|
||||
egrep -q 'vmx|svm' /proc/cpuinfo && echo yes || echo no
|
||||
```
|
||||
|
||||
If the above command outputs "no":
|
||||
|
||||
- If you are running within a VM, your hypervisor does not allow nested virtualization. You will need to use the *None (bare-metal)* driver
|
||||
- If you are running on a physical machine, ensure that your BIOS has hardware virtualization enabled
|
||||
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
To check if virtualization is supported, run the following command on your Windows terminal or command prompt.
|
||||
|
||||
```shell
|
||||
systeminfo
|
||||
```
|
||||
If you see the following output, virtualization is supported:
|
||||
|
||||
```shell
|
||||
Hyper-V Requirements: VM Monitor Mode Extensions: Yes
|
||||
Virtualization Enabled In Firmware: Yes
|
||||
Second Level Address Translation: Yes
|
||||
Data Execution Prevention Available: Yes
|
||||
```
|
||||
|
||||
If you see the following output, your system already has a Hypervisor installed and you can skip the next step.
|
||||
|
||||
```shell
|
||||
Hyper-V Requirements: A hypervisor has been detected.
|
||||
```
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
## Install Docker
|
||||
|
||||
- [Docker Desktop](https://hub.docker.com/search?q=&type=edition&offering=community&sort=updated_at&order=desc)
|
||||
|
||||
## Usage
|
||||
|
||||
Start a cluster using the docker driver:
|
||||
|
||||
```shell
|
||||
minikube start --driver=docker
|
||||
```
|
||||
To make docker the default driver:
|
||||
|
||||
```shell
|
||||
minikube config set driver docker
|
||||
```
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
## experimental
|
||||
|
||||
This is an experimental driver. please use it only for experimental reasons.
|
||||
for a better kubernetes in container experience, use docker [driver](https://minikube.sigs.k8s.io/docs/reference/drivers/docker).
|
||||
|
||||
## Install Podman
|
||||
|
||||
- [Podman](https://podman.io/getting-started/installation.html)
|
||||
|
||||
## Usage
|
||||
|
||||
Start a cluster using the docker driver:
|
||||
|
||||
```shell
|
||||
minikube start --driver=podman
|
||||
```
|
||||
To make docker the default driver:
|
||||
|
||||
```shell
|
||||
minikube config set driver podman
|
||||
```
|
||||
|
|
@ -8,12 +8,17 @@ description: >
|
|||
Linux KVM (Kernel-based Virtual Machine) driver
|
||||
---
|
||||
|
||||
|
||||
## Overview
|
||||
|
||||
[KVM (Kernel-based Virtual Machine)](https://www.linux-kvm.org/page/Main_Page) is a full virtualization solution for Linux on x86 hardware containing virtualization extensions. To work with KVM, minikube uses the [libvirt virtualization API](https://libvirt.org/)
|
||||
|
||||
{{% readfile file="/docs/Reference/Drivers/includes/kvm2_usage.inc" %}}
|
||||
|
||||
## Check virtualization support
|
||||
|
||||
{{% readfile file="/docs/Reference/Drivers/includes/check_virtualization_linux.inc" %}}
|
||||
|
||||
## Special features
|
||||
|
||||
The `minikube start` command supports 3 additional kvm specific flags:
|
||||
|
|
|
|||
|
|
@ -0,0 +1,26 @@
|
|||
---
|
||||
title: "podman"
|
||||
linkTitle: "podman"
|
||||
weight: 3
|
||||
date: 2020-03-26
|
||||
description: >
|
||||
Podman driver
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
The podman driver is another kubernetes in container driver for minikube. simmilar to [docker](https://minikube.sigs.k8s.io/docs/reference/drivers/docker/) driver.
|
||||
podman driver is currently experimental.
|
||||
and only supported on Linux and MacOs (with a remote podman server)
|
||||
|
||||
|
||||
## Try it with CRI-O container runtime.
|
||||
```shell
|
||||
minikube start --driver=podman --container-runtime=cri-o
|
||||
```
|
||||
|
||||
|
||||
{{% readfile file="/docs/Reference/Drivers/includes/podman_usage.inc" %}}
|
||||
|
||||
|
||||
|
||||
|
|
@ -4,7 +4,7 @@ linkTitle: "Disk cache"
|
|||
weight: 6
|
||||
date: 2019-08-01
|
||||
description: >
|
||||
Cache Rules Everything Around Minikube
|
||||
Cache Rules Everything Around minikube
|
||||
---
|
||||
|
||||
minikube has built-in support for caching downloaded resources into `$MINIKUBE_HOME/cache`. Here are the important file locations:
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ description: >
|
|||
About persistent volumes (hostPath)
|
||||
---
|
||||
|
||||
minikube supports [PersistentVolumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) of type `hostPath` out of the box. These PersistentVolumes are mapped to a directory inside the running Minikube instance (usually a VM, unless you use `--driver=none`, `--driver=docker`, or `--driver=podman`). For more information on how this works, read the Dynamic Provisioning section below.
|
||||
minikube supports [PersistentVolumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) of type `hostPath` out of the box. These PersistentVolumes are mapped to a directory inside the running minikube instance (usually a VM, unless you use `--driver=none`, `--driver=docker`, or `--driver=podman`). For more information on how this works, read the Dynamic Provisioning section below.
|
||||
|
||||
## A note on mounts, persistence, and minikube hosts
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
### Getting to know Kubernetes
|
||||
## Getting to know Kubernetes
|
||||
|
||||
Once started, you can use any regular Kubernetes command to interact with your minikube cluster. For example, you can see the pod states by running:
|
||||
|
||||
|
|
@ -6,16 +6,16 @@ Once started, you can use any regular Kubernetes command to interact with your m
|
|||
kubectl get po -A
|
||||
```
|
||||
|
||||
### Increasing memory allocation
|
||||
## Increasing memory allocation
|
||||
|
||||
minikube only allocates 2GB of RAM by default, which is only enough for trivial deployments. For larger
|
||||
minikube auto-selects the memory size based on your system up to 6000mb. For larger
|
||||
deployments, increase the memory allocation using the `--memory` flag, or make the setting persistent using:
|
||||
|
||||
```shell
|
||||
minikube config set memory 4096
|
||||
minikube config set memory 8096
|
||||
```
|
||||
|
||||
### Where to go next?
|
||||
## Where to go next?
|
||||
|
||||
Visit the [examples](/docs/examples) page to get an idea of what you can do with minikube.
|
||||
|
||||
|
|
|
|||
|
|
@ -39,32 +39,39 @@ curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-{{< la
|
|||
{{% /tab %}}
|
||||
{{% /tabs %}}
|
||||
|
||||
## Hypervisor Setup
|
||||
|
||||
Verify that your system has virtualization support enabled:
|
||||
|
||||
```shell
|
||||
egrep -q 'vmx|svm' /proc/cpuinfo && echo yes || echo no
|
||||
```
|
||||
|
||||
If the above command outputs "no":
|
||||
|
||||
- If you are running within a VM, your hypervisor does not allow nested virtualization. You will need to use the *None (bare-metal)* driver
|
||||
- If you are running on a physical machine, ensure that your BIOS has hardware virtualization enabled
|
||||
## Driver Setup
|
||||
|
||||
{{% tabs %}}
|
||||
{{% tab "Docker" %}}
|
||||
## Check container support
|
||||
{{% readfile file="/docs/Reference/Drivers/includes/docker_usage.inc" %}}
|
||||
{{% /tab %}}
|
||||
|
||||
{{% tab "KVM" %}}
|
||||
## Check virtualization support
|
||||
{{% readfile file="/docs/Reference/Drivers/includes/check_virtualization_linux.inc" %}}
|
||||
|
||||
{{% readfile file="/docs/Reference/Drivers/includes/kvm2_usage.inc" %}}
|
||||
{{% /tab %}}
|
||||
{{% tab "VirtualBox" %}}
|
||||
## Check virtualization support
|
||||
{{% readfile file="/docs/Reference/Drivers/includes/check_virtualization_linux.inc" %}}
|
||||
|
||||
{{% readfile file="/docs/Reference/Drivers/includes/virtualbox_usage.inc" %}}
|
||||
{{% /tab %}}
|
||||
{{% tab "None (bare-metal)" %}}
|
||||
## Check baremetal support
|
||||
{{% readfile file="/docs/Reference/Drivers/includes/check_baremetal_linux.inc" %}}
|
||||
|
||||
If you are already running minikube from inside a VM, it is possible to skip the creation of an additional VM layer by using the `none` driver.
|
||||
|
||||
{{% readfile file="/docs/Reference/Drivers/includes/none_usage.inc" %}}
|
||||
{{% /tab %}}
|
||||
{{% tab "Podman (experimental)" %}}
|
||||
{{% readfile file="/docs/Reference/Drivers/includes/podman_usage.inc" %}}
|
||||
{{% /tab %}}
|
||||
|
||||
|
||||
{{% /tabs %}}
|
||||
|
||||
{{% readfile file="/docs/Start/includes/post_install.inc" %}}
|
||||
|
|
|
|||
|
|
@ -50,6 +50,9 @@ brew upgrade minikube
|
|||
## Hypervisor Setup
|
||||
|
||||
{{% tabs %}}
|
||||
{{% tab "Docker" %}}
|
||||
{{% readfile file="/docs/Reference/Drivers/includes/docker_usage.inc" %}}
|
||||
{{% /tab %}}
|
||||
{{% tab "Hyperkit" %}}
|
||||
{{% readfile file="/docs/Reference/Drivers/includes/hyperkit_usage.inc" %}}
|
||||
{{% /tab %}}
|
||||
|
|
@ -62,6 +65,9 @@ brew upgrade minikube
|
|||
{{% tab "VMware" %}}
|
||||
{{% readfile file="/docs/Reference/Drivers/includes/vmware_macos_usage.inc" %}}
|
||||
{{% /tab %}}
|
||||
{{% tab "Podman (experimental)" %}}
|
||||
{{% readfile file="/docs/Reference/Drivers/includes/podman_usage.inc" %}}
|
||||
{{% /tab %}}
|
||||
|
||||
{{% /tabs %}}
|
||||
|
||||
|
|
|
|||
|
|
@ -7,8 +7,6 @@ weight: 3
|
|||
### Prerequisites
|
||||
|
||||
* Windows 8 or above
|
||||
* A hypervisor, such as Hyper-V or VirtualBox
|
||||
* Hardware virtualization support must be enabled in BIOS
|
||||
* 4GB of RAM
|
||||
|
||||
### Installation
|
||||
|
|
@ -30,33 +28,23 @@ After it has installed, close the current CLI session and reopen it. minikube sh
|
|||
{{% /tab %}}
|
||||
{{% /tabs %}}
|
||||
|
||||
## Hypervisor Setup
|
||||
|
||||
To check if virtualization is supported, run the following command on your Windows terminal or command prompt.
|
||||
|
||||
```shell
|
||||
systeminfo
|
||||
```
|
||||
If you see the following output, virtualization is supported:
|
||||
|
||||
```shell
|
||||
Hyper-V Requirements: VM Monitor Mode Extensions: Yes
|
||||
Virtualization Enabled In Firmware: Yes
|
||||
Second Level Address Translation: Yes
|
||||
Data Execution Prevention Available: Yes
|
||||
```
|
||||
|
||||
If you see the following output, your system already has a Hypervisor installed and you can skip the next step.
|
||||
|
||||
```shell
|
||||
Hyper-V Requirements: A hypervisor has been detected.
|
||||
```
|
||||
|
||||
{{% tabs %}}
|
||||
{{% tab "Docker" %}}
|
||||
{{% readfile file="/docs/Reference/Drivers/includes/docker_usage.inc" %}}
|
||||
{{% /tab %}}
|
||||
|
||||
{{% tab "Hyper-V" %}}
|
||||
## Check Hypervisor
|
||||
{{% readfile file="/docs/Reference/Drivers/includes/check_virtualization_windows.inc" %}}
|
||||
|
||||
{{% readfile file="/docs/Reference/Drivers/includes/hyperv_usage.inc" %}}
|
||||
{{% /tab %}}
|
||||
{{% tab "VirtualBox" %}}
|
||||
## Check Hypervisor
|
||||
{{% readfile file="/docs/Reference/Drivers/includes/check_virtualization_windows.inc" %}}
|
||||
|
||||
{{% readfile file="/docs/Reference/Drivers/includes/virtualbox_usage.inc" %}}
|
||||
{{% /tab %}}
|
||||
{{% /tabs %}}
|
||||
|
|
|
|||
|
|
@ -9,13 +9,13 @@ description: >
|
|||
|
||||
## Overview
|
||||
|
||||
Most continuous integration environments are already running inside a VM, and may not support nested virtualization. The `none` driver was designed for this use case.
|
||||
Most continuous integration environments are already running inside a VM, and may not support nested virtualization. The `none` driver was designed for this use case. or you could alternatively use the [Docker](https://minikube.sigs.k8s.io/docs/reference/drivers/docker).
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- VM running a systemd based Linux distribution
|
||||
|
||||
## Tutorial
|
||||
## using none driver
|
||||
|
||||
Here is an example, that runs minikube from a non-root user, and ensures that the latest stable kubectl is installed:
|
||||
|
||||
|
|
@ -39,3 +39,7 @@ touch $KUBECONFIG
|
|||
|
||||
sudo -E minikube start --driver=none
|
||||
```
|
||||
|
||||
## Alternative ways
|
||||
|
||||
you could alternatively use minikube's container drivers such as [Docker](https://minikube.sigs.k8s.io/docs/reference/drivers/docker) or [Podman](https://minikube.sigs.k8s.io/docs/reference/drivers/podman).
|
||||
|
|
@ -98,7 +98,7 @@ to expose GPUs with `--driver=kvm2`. Please don't mix these instructions.
|
|||
|
||||
## Why does minikube not support NVIDIA GPUs on macOS?
|
||||
|
||||
VM drivers supported by minikube for macOS doesn't support GPU passthrough:
|
||||
drivers supported by minikube for macOS doesn't support GPU passthrough:
|
||||
|
||||
- [mist64/xhyve#108](https://github.com/mist64/xhyve/issues/108)
|
||||
- [moby/hyperkit#159](https://github.com/moby/hyperkit/issues/159)
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ description: >
|
|||
|
||||
Most organizations deploy their own Root Certificate and CA service inside the corporate networks.
|
||||
Internal websites, image repositories and other resources may install SSL server certificates issued by this CA service for security and privacy concerns.
|
||||
You may install the Root Certificate into the minikube VM to access these corporate resources within the cluster.
|
||||
You may install the Root Certificate into the minikube cluster to access these corporate resources within the cluster.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
|
|
@ -26,13 +26,13 @@ You may install the Root Certificate into the minikube VM to access these corpor
|
|||
openssl x509 -inform der -in my_company.cer -out my_company.pem
|
||||
```
|
||||
|
||||
* You may need to delete existing minikube VM
|
||||
* You may need to delete existing minikube cluster
|
||||
|
||||
```shell
|
||||
minikube delete
|
||||
```
|
||||
|
||||
* Copy the certificate before creating the minikube VM
|
||||
* Copy the certificate before creating the minikube cluster
|
||||
|
||||
```shell
|
||||
mkdir -p $HOME/.minikube/certs
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
Subproject commit 493bb1a0af92d1242f8396aeb1661dcd3a010db7
|
||||
Subproject commit 3123298f5b0f56b3315b55319e17a8fa6c9d98f9
|
||||
|
|
@ -67,22 +67,22 @@ func TestDownloadOnly(t *testing.T) {
|
|||
}
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", args, err)
|
||||
t.Errorf("failed to download only. args: %q %v", args, err)
|
||||
}
|
||||
|
||||
// skip for none, as none driver does not have preload feature.
|
||||
if !NoneDriver() {
|
||||
if download.PreloadExists(v, r) {
|
||||
// Just make sure the tarball path exists
|
||||
if _, err := os.Stat(download.TarballPath(v)); err != nil {
|
||||
t.Errorf("preloaded tarball path doesn't exist: %v", err)
|
||||
if _, err := os.Stat(download.TarballPath(v, r)); err != nil {
|
||||
t.Errorf("failed to verify preloaded tarball file exists: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
imgs, err := images.Kubeadm("", v)
|
||||
if err != nil {
|
||||
t.Errorf("kubeadm images: %v %+v", v, err)
|
||||
t.Errorf("failed to get kubeadm images for %v: %+v", v, err)
|
||||
}
|
||||
|
||||
// skip verify for cache images if --driver=none
|
||||
|
|
@ -129,7 +129,7 @@ func TestDownloadOnly(t *testing.T) {
|
|||
}
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "delete", "--all"))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to delete all. args: %q : %v", rr.Command(), err)
|
||||
}
|
||||
})
|
||||
// Delete should always succeed, even if previously partially or fully deleted.
|
||||
|
|
@ -139,7 +139,7 @@ func TestDownloadOnly(t *testing.T) {
|
|||
}
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "delete", "-p", profile))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to delete. args: %q: %v", rr.Command(), err)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
|
@ -154,26 +154,28 @@ func TestDownloadOnlyKic(t *testing.T) {
|
|||
ctx, cancel := context.WithTimeout(context.Background(), Minutes(15))
|
||||
defer Cleanup(t, profile, cancel)
|
||||
|
||||
cRuntime := "docker"
|
||||
|
||||
args := []string{"start", "--download-only", "-p", profile, "--force", "--alsologtostderr"}
|
||||
args = append(args, StartArgs()...)
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v:\n%s", args, err, rr.Output())
|
||||
|
||||
if _, err := Run(t, exec.CommandContext(ctx, Target(), args...)); err != nil {
|
||||
t.Errorf("start with download only failed %q : %v", args, err)
|
||||
}
|
||||
|
||||
// Make sure the downloaded image tarball exists
|
||||
tarball := download.TarballPath(constants.DefaultKubernetesVersion)
|
||||
tarball := download.TarballPath(constants.DefaultKubernetesVersion, cRuntime)
|
||||
contents, err := ioutil.ReadFile(tarball)
|
||||
if err != nil {
|
||||
t.Errorf("reading tarball: %v", err)
|
||||
t.Errorf("failed to read tarball file %q: %v", tarball, err)
|
||||
}
|
||||
// Make sure it has the correct checksum
|
||||
checksum := md5.Sum(contents)
|
||||
remoteChecksum, err := ioutil.ReadFile(download.PreloadChecksumPath(constants.DefaultKubernetesVersion))
|
||||
remoteChecksum, err := ioutil.ReadFile(download.PreloadChecksumPath(constants.DefaultKubernetesVersion, cRuntime))
|
||||
if err != nil {
|
||||
t.Errorf("reading checksum file: %v", err)
|
||||
t.Errorf("failed to read checksum file %q : %v", download.PreloadChecksumPath(constants.DefaultKubernetesVersion, cRuntime), err)
|
||||
}
|
||||
if string(remoteChecksum) != string(checksum[:]) {
|
||||
t.Errorf("checksum of %s does not match remote checksum (%s != %s)", tarball, string(remoteChecksum), string(checksum[:]))
|
||||
t.Errorf("failed to verify checksum. checksum of %q does not match remote checksum (%q != %q)", tarball, string(remoteChecksum), string(checksum[:]))
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -53,7 +53,7 @@ func TestOffline(t *testing.T) {
|
|||
rr, err := Run(t, c)
|
||||
if err != nil {
|
||||
// Fatal so that we may collect logs before stop/delete steps
|
||||
t.Fatalf("%s failed: %v", rr.Args, err)
|
||||
t.Fatalf("%s failed: %v", rr.Command(), err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ func TestAddons(t *testing.T) {
|
|||
args := append([]string{"start", "-p", profile, "--wait=false", "--memory=2600", "--alsologtostderr", "-v=1", "--addons=ingress", "--addons=registry", "--addons=metrics-server", "--addons=helm-tiller"}, StartArgs()...)
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||
if err != nil {
|
||||
t.Fatalf("%s failed: %v", rr.Args, err)
|
||||
t.Fatalf("%s failed: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
// Parallelized tests
|
||||
|
|
@ -69,15 +69,15 @@ func TestAddons(t *testing.T) {
|
|||
// Assert that disable/enable works offline
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to stop minikube. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "addons", "enable", "dashboard", "-p", profile))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to enable dashboard addon: args %q : %v", rr.Command(), err)
|
||||
}
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "addons", "disable", "dashboard", "-p", profile))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to disable dashboard addon: args %q : %v", rr.Command(), err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -88,30 +88,30 @@ func validateIngressAddon(ctx context.Context, t *testing.T, profile string) {
|
|||
|
||||
client, err := kapi.Client(profile)
|
||||
if err != nil {
|
||||
t.Fatalf("kubernetes client: %v", client)
|
||||
t.Fatalf("failed to get kubernetes client: %v", client)
|
||||
}
|
||||
|
||||
if err := kapi.WaitForDeploymentToStabilize(client, "kube-system", "nginx-ingress-controller", Minutes(6)); err != nil {
|
||||
t.Errorf("waiting for ingress-controller deployment to stabilize: %v", err)
|
||||
t.Errorf("failed waiting for ingress-controller deployment to stabilize: %v", err)
|
||||
}
|
||||
if _, err := PodWait(ctx, t, profile, "kube-system", "app.kubernetes.io/name=nginx-ingress-controller", Minutes(12)); err != nil {
|
||||
t.Fatalf("wait: %v", err)
|
||||
t.Fatalf("failed waititing for nginx-ingress-controller : %v", err)
|
||||
}
|
||||
|
||||
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "nginx-ing.yaml")))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to kubectl replace nginx-ing. args %q. %v", rr.Command(), err)
|
||||
}
|
||||
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "nginx-pod-svc.yaml")))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to kubectl replace nginx-pod-svc. args %q. %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
if _, err := PodWait(ctx, t, profile, "default", "run=nginx", Minutes(4)); err != nil {
|
||||
t.Fatalf("wait: %v", err)
|
||||
t.Fatalf("failed waiting for ngnix pod: %v", err)
|
||||
}
|
||||
if err := kapi.WaitForService(client, "default", "nginx", true, time.Millisecond*500, Minutes(10)); err != nil {
|
||||
t.Errorf("Error waiting for nginx service to be up")
|
||||
t.Errorf("failed waiting for nginx service to be up: %v", err)
|
||||
}
|
||||
|
||||
want := "Welcome to nginx!"
|
||||
|
|
@ -121,65 +121,65 @@ func validateIngressAddon(ctx context.Context, t *testing.T, profile string) {
|
|||
return err
|
||||
}
|
||||
if rr.Stderr.String() != "" {
|
||||
t.Logf("%v: unexpected stderr: %s", rr.Args, rr.Stderr)
|
||||
t.Logf("%v: unexpected stderr: %s (may be temproary)", rr.Command(), rr.Stderr)
|
||||
}
|
||||
if !strings.Contains(rr.Stdout.String(), want) {
|
||||
return fmt.Errorf("%v stdout = %q, want %q", rr.Args, rr.Stdout, want)
|
||||
return fmt.Errorf("%v stdout = %q, want %q", rr.Command(), rr.Stdout, want)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := retry.Expo(checkIngress, 500*time.Millisecond, Seconds(90)); err != nil {
|
||||
t.Errorf("ingress never responded as expected on 127.0.0.1:80: %v", err)
|
||||
t.Errorf("failed to get response from ngninx ingress on 127.0.0.1:80: %v", err)
|
||||
}
|
||||
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "ingress", "--alsologtostderr", "-v=1"))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to disable ingress addon. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
}
|
||||
|
||||
func validateRegistryAddon(ctx context.Context, t *testing.T, profile string) {
|
||||
client, err := kapi.Client(profile)
|
||||
if err != nil {
|
||||
t.Fatalf("kubernetes client: %v", client)
|
||||
t.Fatalf("failed to get kubernetes client for %s : %v", profile, err)
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
if err := kapi.WaitForRCToStabilize(client, "kube-system", "registry", Minutes(6)); err != nil {
|
||||
t.Errorf("waiting for registry replicacontroller to stabilize: %v", err)
|
||||
t.Errorf("failed waiting for registry replicacontroller to stabilize: %v", err)
|
||||
}
|
||||
t.Logf("registry stabilized in %s", time.Since(start))
|
||||
|
||||
if _, err := PodWait(ctx, t, profile, "kube-system", "actual-registry=true", Minutes(6)); err != nil {
|
||||
t.Fatalf("wait: %v", err)
|
||||
t.Fatalf("failed waiting for pod actual-registry: %v", err)
|
||||
}
|
||||
if _, err := PodWait(ctx, t, profile, "kube-system", "registry-proxy=true", Minutes(10)); err != nil {
|
||||
t.Fatalf("wait: %v", err)
|
||||
t.Fatalf("failed waiting for pod registry-proxy: %v", err)
|
||||
}
|
||||
|
||||
// Test from inside the cluster (no curl available on busybox)
|
||||
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "delete", "po", "-l", "run=registry-test", "--now"))
|
||||
if err != nil {
|
||||
t.Logf("pre-cleanup %s failed: %v (not a problem)", rr.Args, err)
|
||||
t.Logf("pre-cleanup %s failed: %v (not a problem)", rr.Command(), err)
|
||||
}
|
||||
|
||||
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "run", "--rm", "registry-test", "--restart=Never", "--image=busybox", "-it", "--", "sh", "-c", "wget --spider -S http://registry.kube-system.svc.cluster.local"))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to hit registry.kube-system.svc.cluster.local. args %q failed: %v", rr.Command(), err)
|
||||
}
|
||||
want := "HTTP/1.1 200"
|
||||
if !strings.Contains(rr.Stdout.String(), want) {
|
||||
t.Errorf("curl = %q, want *%s*", rr.Stdout.String(), want)
|
||||
t.Errorf("expected curl response be %q, but got *%s*", want, rr.Stdout.String())
|
||||
}
|
||||
|
||||
// Test from outside the cluster
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ip"))
|
||||
if err != nil {
|
||||
t.Fatalf("%s failed: %v", rr.Args, err)
|
||||
t.Fatalf("failed run minikube ip. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
if rr.Stderr.String() != "" {
|
||||
t.Errorf("%s: unexpected stderr: %s", rr.Args, rr.Stderr)
|
||||
t.Errorf("expected stderr to be -empty- but got: *%q* . args %q", rr.Stderr, rr.Command())
|
||||
}
|
||||
|
||||
endpoint := fmt.Sprintf("http://%s:%d", strings.TrimSpace(rr.Stdout.String()), 5000)
|
||||
|
|
@ -199,30 +199,30 @@ func validateRegistryAddon(ctx context.Context, t *testing.T, profile string) {
|
|||
return nil
|
||||
}
|
||||
|
||||
if err := retry.Expo(checkExternalAccess, 500*time.Millisecond, Minutes(2)); err != nil {
|
||||
t.Errorf(err.Error())
|
||||
if err := retry.Expo(checkExternalAccess, 500*time.Millisecond, Seconds(150)); err != nil {
|
||||
t.Errorf("failed to check external access to %s: %v", u.String(), err.Error())
|
||||
}
|
||||
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "registry", "--alsologtostderr", "-v=1"))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to disable registry addon. args %q: %v", rr.Command(), err)
|
||||
}
|
||||
}
|
||||
|
||||
func validateMetricsServerAddon(ctx context.Context, t *testing.T, profile string) {
|
||||
client, err := kapi.Client(profile)
|
||||
if err != nil {
|
||||
t.Fatalf("kubernetes client: %v", client)
|
||||
t.Fatalf("failed to get kubernetes client for %s: %v", profile, err)
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
if err := kapi.WaitForDeploymentToStabilize(client, "kube-system", "metrics-server", Minutes(6)); err != nil {
|
||||
t.Errorf("waiting for metrics-server deployment to stabilize: %v", err)
|
||||
t.Errorf("failed waiting for metrics-server deployment to stabilize: %v", err)
|
||||
}
|
||||
t.Logf("metrics-server stabilized in %s", time.Since(start))
|
||||
|
||||
if _, err := PodWait(ctx, t, profile, "kube-system", "k8s-app=metrics-server", Minutes(6)); err != nil {
|
||||
t.Fatalf("wait: %v", err)
|
||||
t.Fatalf("failed waiting for k8s-app=metrics-server pod: %v", err)
|
||||
}
|
||||
|
||||
want := "CPU(cores)"
|
||||
|
|
@ -232,63 +232,71 @@ func validateMetricsServerAddon(ctx context.Context, t *testing.T, profile strin
|
|||
return err
|
||||
}
|
||||
if rr.Stderr.String() != "" {
|
||||
t.Logf("%v: unexpected stderr: %s", rr.Args, rr.Stderr)
|
||||
t.Logf("%v: unexpected stderr: %s", rr.Command(), rr.Stderr)
|
||||
}
|
||||
if !strings.Contains(rr.Stdout.String(), want) {
|
||||
return fmt.Errorf("%v stdout = %q, want %q", rr.Args, rr.Stdout, want)
|
||||
return fmt.Errorf("%v stdout = %q, want %q", rr.Command(), rr.Stdout, want)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// metrics-server takes some time to be able to collect metrics
|
||||
if err := retry.Expo(checkMetricsServer, time.Second*3, Minutes(6)); err != nil {
|
||||
t.Errorf(err.Error())
|
||||
t.Errorf("failed checking metric server: %v", err.Error())
|
||||
}
|
||||
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "metrics-server", "--alsologtostderr", "-v=1"))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to disable metrics-server addon: args %q: %v", rr.Command(), err)
|
||||
}
|
||||
}
|
||||
|
||||
func validateHelmTillerAddon(ctx context.Context, t *testing.T, profile string) {
|
||||
client, err := kapi.Client(profile)
|
||||
if err != nil {
|
||||
t.Fatalf("kubernetes client: %v", client)
|
||||
t.Fatalf("failed to get kubernetes client for %s: %v", profile, err)
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
if err := kapi.WaitForDeploymentToStabilize(client, "kube-system", "tiller-deploy", Minutes(6)); err != nil {
|
||||
t.Errorf("waiting for tiller-deploy deployment to stabilize: %v", err)
|
||||
t.Errorf("failed waiting for tiller-deploy deployment to stabilize: %v", err)
|
||||
}
|
||||
t.Logf("tiller-deploy stabilized in %s", time.Since(start))
|
||||
|
||||
if _, err := PodWait(ctx, t, profile, "kube-system", "app=helm", Minutes(6)); err != nil {
|
||||
t.Fatalf("wait: %v", err)
|
||||
t.Fatalf("failed waiting for helm pod: %v", err)
|
||||
}
|
||||
|
||||
if NoneDriver() {
|
||||
_, err := exec.LookPath("socat")
|
||||
if err != nil {
|
||||
t.Skipf("socat is required by kubectl to complete this test")
|
||||
}
|
||||
}
|
||||
|
||||
want := "Server: &version.Version"
|
||||
// Test from inside the cluster (`helm version` use pod.list permission. we use tiller serviceaccount in kube-system to list pod)
|
||||
checkHelmTiller := func() error {
|
||||
|
||||
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "run", "--rm", "helm-test", "--restart=Never", "--image=alpine/helm:2.16.3", "-it", "--namespace=kube-system", "--serviceaccount=tiller", "--", "version"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rr.Stderr.String() != "" {
|
||||
t.Logf("%v: unexpected stderr: %s", rr.Args, rr.Stderr)
|
||||
t.Logf("%v: unexpected stderr: %s", rr.Command(), rr.Stderr)
|
||||
}
|
||||
if !strings.Contains(rr.Stdout.String(), want) {
|
||||
return fmt.Errorf("%v stdout = %q, want %q", rr.Args, rr.Stdout, want)
|
||||
return fmt.Errorf("%v stdout = %q, want %q", rr.Command(), rr.Stdout, want)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := retry.Expo(checkHelmTiller, 500*time.Millisecond, Minutes(2)); err != nil {
|
||||
t.Errorf(err.Error())
|
||||
t.Errorf("failed checking helm tiller: %v", err.Error())
|
||||
}
|
||||
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "helm-tiller", "--alsologtostderr", "-v=1"))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed disabling helm-tiller addon. arg %q.s %v", rr.Command(), err)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -39,27 +39,27 @@ func TestDockerFlags(t *testing.T) {
|
|||
args := append([]string{"start", "-p", profile, "--cache-images=false", "--memory=1800", "--install-addons=false", "--wait=false", "--docker-env=FOO=BAR", "--docker-env=BAZ=BAT", "--docker-opt=debug", "--docker-opt=icc=true", "--alsologtostderr", "-v=5"}, StartArgs()...)
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to start minikube with args: %q : %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo systemctl show docker --property=Environment --no-pager"))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to 'systemctl show docker' inside minikube. args %q: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
for _, envVar := range []string{"FOO=BAR", "BAZ=BAT"} {
|
||||
if !strings.Contains(rr.Stdout.String(), envVar) {
|
||||
t.Errorf("env var %s missing: %s.", envVar, rr.Stdout)
|
||||
t.Errorf("expected env key/value %q to be passed to minikube's docker and be included in: *%q*.", envVar, rr.Stdout)
|
||||
}
|
||||
}
|
||||
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo systemctl show docker --property=ExecStart --no-pager"))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed on the second 'systemctl show docker' inside minikube. args %q: %v", rr.Command(), err)
|
||||
}
|
||||
for _, opt := range []string{"--debug", "--icc=true"} {
|
||||
if !strings.Contains(rr.Stdout.String(), opt) {
|
||||
t.Fatalf("%s = %q, want *%s*", rr.Command(), rr.Stdout, opt)
|
||||
t.Fatalf("expected %q output to have include *%s* . output: %q", rr.Command(), opt, rr.Stdout)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -66,10 +66,10 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) {
|
|||
|
||||
defer func() {
|
||||
if t.Failed() {
|
||||
t.Logf("%s failed, getting debug info...", t.Name())
|
||||
t.Logf("%q failed, getting debug info...", t.Name())
|
||||
rr, err := Run(t, exec.Command(Target(), "-p", profile, "ssh", "mount | grep 9p; ls -la /mount-9p; cat /mount-9p/pod-dates"))
|
||||
if err != nil {
|
||||
t.Logf("%s: %v", rr.Command(), err)
|
||||
t.Logf("debugging command %q failed : %v", rr.Command(), err)
|
||||
} else {
|
||||
t.Logf("(debug) %s:\n%s", rr.Command(), rr.Stdout)
|
||||
}
|
||||
|
|
@ -78,7 +78,7 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) {
|
|||
// Cleanup in advance of future tests
|
||||
rr, err := Run(t, exec.Command(Target(), "-p", profile, "ssh", "sudo umount -f /mount-9p"))
|
||||
if err != nil {
|
||||
t.Logf("%s: %v", rr.Command(), err)
|
||||
t.Logf("%q: %v", rr.Command(), err)
|
||||
}
|
||||
ss.Stop(t)
|
||||
cancel()
|
||||
|
|
@ -117,7 +117,7 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) {
|
|||
// Assert that we can access the mount without an error. Display for debugging.
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "--", "ls", "-la", guestMount))
|
||||
if err != nil {
|
||||
t.Fatalf("%s failed: %v", rr.Args, err)
|
||||
t.Fatalf("failed verifying accessing to the mount. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
t.Logf("guest mount directory contents\n%s", rr.Stdout)
|
||||
|
||||
|
|
@ -125,7 +125,7 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) {
|
|||
tp := filepath.Join("/mount-9p", testMarker)
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "cat", tp))
|
||||
if err != nil {
|
||||
t.Fatalf("%s failed: %v", rr.Args, err)
|
||||
t.Fatalf("failed to verify the mount contains unique test marked: args %q: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(rr.Stdout.Bytes(), wantFromTest) {
|
||||
|
|
@ -136,28 +136,28 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) {
|
|||
// Start the "busybox-mount" pod.
|
||||
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "busybox-mount-test.yaml")))
|
||||
if err != nil {
|
||||
t.Fatalf("%s failed: %v", rr.Args, err)
|
||||
t.Fatalf("failed to 'kubectl replace' for busybox-mount-test. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
if _, err := PodWait(ctx, t, profile, "default", "integration-test=busybox-mount", Minutes(4)); err != nil {
|
||||
t.Fatalf("wait: %v", err)
|
||||
t.Fatalf("failed waiting for busybox-mount pod: %v", err)
|
||||
}
|
||||
|
||||
// Read the file written by pod startup
|
||||
p := filepath.Join(tempDir, createdByPod)
|
||||
got, err := ioutil.ReadFile(p)
|
||||
if err != nil {
|
||||
t.Errorf("readfile %s: %v", p, err)
|
||||
t.Errorf("failed to read file created by pod %q: %v", p, err)
|
||||
}
|
||||
wantFromPod := []byte("test\n")
|
||||
if !bytes.Equal(got, wantFromPod) {
|
||||
t.Errorf("%s = %q, want %q", p, got, wantFromPod)
|
||||
t.Errorf("the content of the file %q is %q, but want it to be: *%q*", p, got, wantFromPod)
|
||||
}
|
||||
|
||||
// test that file written from host was read in by the pod via cat /mount-9p/written-by-host;
|
||||
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "logs", "busybox-mount"))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to get kubectl logs for busybox-mount. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
if !bytes.Equal(rr.Stdout.Bytes(), wantFromTest) {
|
||||
t.Errorf("busybox-mount logs = %q, want %q", rr.Stdout.Bytes(), wantFromTest)
|
||||
|
|
@ -169,27 +169,27 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) {
|
|||
// test that file written from host was read in by the pod via cat /mount-9p/fromhost;
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "stat", gp))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to stat the file %q iniside minikube : args %q: %v", gp, rr.Command(), err)
|
||||
}
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
if strings.Contains(rr.Stdout.String(), "Access: 1970-01-01") {
|
||||
t.Errorf("invalid access time: %v", rr.Stdout)
|
||||
t.Errorf("expected to get valid access time but got: %q", rr.Stdout)
|
||||
}
|
||||
}
|
||||
|
||||
if strings.Contains(rr.Stdout.String(), "Modify: 1970-01-01") {
|
||||
t.Errorf("invalid modify time: %v", rr.Stdout)
|
||||
t.Errorf("expected to get valid modify time but got: %q", rr.Stdout)
|
||||
}
|
||||
}
|
||||
|
||||
p = filepath.Join(tempDir, createdByTestRemovedByPod)
|
||||
if _, err := os.Stat(p); err == nil {
|
||||
t.Errorf("expected file %s to be removed", p)
|
||||
t.Errorf("expected file %q to be removed but exists !", p)
|
||||
}
|
||||
|
||||
p = filepath.Join(tempDir, createdByPodRemovedByTest)
|
||||
if err := os.Remove(p); err != nil {
|
||||
t.Errorf("unexpected error removing file %s: %v", p, err)
|
||||
t.Errorf("failed to remove file %q: %v", p, err)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ func validatePersistentVolumeClaim(ctx context.Context, t *testing.T, profile st
|
|||
defer cancel()
|
||||
|
||||
if _, err := PodWait(ctx, t, profile, "kube-system", "integration-test=storage-provisioner", Minutes(4)); err != nil {
|
||||
t.Fatalf("wait: %v", err)
|
||||
t.Fatalf("failed waiting for storage-provisioner: %v", err)
|
||||
}
|
||||
|
||||
checkStorageClass := func() error {
|
||||
|
|
@ -58,13 +58,13 @@ func validatePersistentVolumeClaim(ctx context.Context, t *testing.T, profile st
|
|||
|
||||
// Ensure the addon-manager has created the StorageClass before creating a claim, otherwise it won't be bound
|
||||
if err := retry.Expo(checkStorageClass, time.Millisecond*500, Seconds(100)); err != nil {
|
||||
t.Errorf("no default storage class after retry: %v", err)
|
||||
t.Errorf("failed to check for storage class: %v", err)
|
||||
}
|
||||
|
||||
// Now create a testpvc
|
||||
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "apply", "-f", filepath.Join(*testdataDir, "pvc.yaml")))
|
||||
if err != nil {
|
||||
t.Fatalf("%s failed: %v", rr.Args, err)
|
||||
t.Fatalf("kubectl apply pvc.yaml failed: args %q: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
checkStoragePhase := func() error {
|
||||
|
|
@ -84,6 +84,6 @@ func validatePersistentVolumeClaim(ctx context.Context, t *testing.T, profile st
|
|||
}
|
||||
|
||||
if err := retry.Expo(checkStoragePhase, 2*time.Second, Minutes(4)); err != nil {
|
||||
t.Fatalf("PV Creation failed with error: %v", err)
|
||||
t.Fatalf("failed to check storage phase: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ func validateTunnelCmd(ctx context.Context, t *testing.T, profile string) {
|
|||
|
||||
client, err := kapi.Client(profile)
|
||||
if err != nil {
|
||||
t.Fatalf("client: %v", err)
|
||||
t.Fatalf("failed to get kubernetes client for %q: %v", profile, err)
|
||||
}
|
||||
|
||||
// Pre-Cleanup
|
||||
|
|
@ -62,14 +62,14 @@ func validateTunnelCmd(ctx context.Context, t *testing.T, profile string) {
|
|||
args := []string{"-p", profile, "tunnel", "--alsologtostderr", "-v=1"}
|
||||
ss, err := Start(t, exec.CommandContext(ctx, Target(), args...))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", args, err)
|
||||
t.Errorf("failed to start a tunnel: args %q: %v", args, err)
|
||||
}
|
||||
defer ss.Stop(t)
|
||||
|
||||
// Start the "nginx" pod.
|
||||
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "apply", "-f", filepath.Join(*testdataDir, "testsvc.yaml")))
|
||||
if err != nil {
|
||||
t.Fatalf("%s failed: %v", rr.Args, err)
|
||||
t.Fatalf("%s failed: %v", rr.Command(), err)
|
||||
}
|
||||
if _, err := PodWait(ctx, t, profile, "default", "run=nginx-svc", Minutes(4)); err != nil {
|
||||
t.Fatalf("wait: %v", err)
|
||||
|
|
@ -97,9 +97,9 @@ func validateTunnelCmd(ctx context.Context, t *testing.T, profile string) {
|
|||
|
||||
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "svc", "nginx-svc"))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("%s failed: %v", rr.Command(), err)
|
||||
}
|
||||
t.Logf("kubectl get svc nginx-svc:\n%s", rr.Stdout)
|
||||
t.Logf("failed to kubectl get svc nginx-svc:\n%s", rr.Stdout)
|
||||
}
|
||||
|
||||
got := []byte{}
|
||||
|
|
@ -120,11 +120,11 @@ func validateTunnelCmd(ctx context.Context, t *testing.T, profile string) {
|
|||
return nil
|
||||
}
|
||||
if err = retry.Expo(fetch, time.Millisecond*500, Minutes(2), 13); err != nil {
|
||||
t.Errorf("failed to contact nginx at %s: %v", nginxIP, err)
|
||||
t.Errorf("failed to hit nginx at %q: %v", nginxIP, err)
|
||||
}
|
||||
|
||||
want := "Welcome to nginx!"
|
||||
if !strings.Contains(string(got), want) {
|
||||
t.Errorf("body = %q, want *%s*", got, want)
|
||||
t.Errorf("expected body to contain %q, but got *%q*", want, got)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -63,11 +63,11 @@ func TestFunctional(t *testing.T) {
|
|||
}
|
||||
p := localSyncTestPath()
|
||||
if err := os.Remove(p); err != nil {
|
||||
t.Logf("unable to remove %s: %v", p, err)
|
||||
t.Logf("unable to remove %q: %v", p, err)
|
||||
}
|
||||
p = localTestCertPath()
|
||||
if err := os.Remove(p); err != nil {
|
||||
t.Logf("unable to remove %s: %v", p, err)
|
||||
t.Logf("unable to remove %q: %v", p, err)
|
||||
}
|
||||
CleanupWithLogs(t, profile, cancel)
|
||||
}()
|
||||
|
|
@ -137,7 +137,7 @@ func TestFunctional(t *testing.T) {
|
|||
func validateNodeLabels(ctx context.Context, t *testing.T, profile string) {
|
||||
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "nodes", "--output=go-template", "--template='{{range $k, $v := (index .items 0).metadata.labels}}{{$k}} {{end}}'"))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to 'kubectl get nodes' with args %q: %v", rr.Command(), err)
|
||||
}
|
||||
expectedLabels := []string{"minikube.k8s.io/commit", "minikube.k8s.io/version", "minikube.k8s.io/updated_at", "minikube.k8s.io/name"}
|
||||
for _, el := range expectedLabels {
|
||||
|
|
@ -155,10 +155,10 @@ func validateDockerEnv(ctx context.Context, t *testing.T, profile string) {
|
|||
c := exec.CommandContext(mctx, "/bin/bash", "-c", "eval $("+Target()+" -p "+profile+" docker-env) && "+Target()+" status -p "+profile)
|
||||
rr, err := Run(t, c)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to do minikube status after eval-ing docker-env %s", err)
|
||||
t.Fatalf("failed to do minikube status after eval-ing docker-env %s", err)
|
||||
}
|
||||
if !strings.Contains(rr.Output(), "Running") {
|
||||
t.Fatalf("Expected status output to include 'Running' after eval docker-env but got \n%s", rr.Output())
|
||||
t.Fatalf("expected status output to include 'Running' after eval docker-env but got: *%q*", rr.Output())
|
||||
}
|
||||
|
||||
mctx, cancel = context.WithTimeout(ctx, Seconds(13))
|
||||
|
|
@ -167,12 +167,12 @@ func validateDockerEnv(ctx context.Context, t *testing.T, profile string) {
|
|||
c = exec.CommandContext(mctx, "/bin/bash", "-c", "eval $("+Target()+" -p "+profile+" docker-env) && docker images")
|
||||
rr, err = Run(t, c)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to test eval docker-evn %s", err)
|
||||
t.Fatalf("failed to run minikube docker-env. args %q : %v ", rr.Command(), err)
|
||||
}
|
||||
|
||||
expectedImgInside := "gcr.io/k8s-minikube/storage-provisioner"
|
||||
if !strings.Contains(rr.Output(), expectedImgInside) {
|
||||
t.Fatalf("Expected 'docker ps' to have %q from docker-daemon inside minikube. the docker ps output is:\n%q\n", expectedImgInside, rr.Output())
|
||||
t.Fatalf("expected 'docker images' to have %q inside minikube. but the output is: *%q*", expectedImgInside, rr.Output())
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -180,11 +180,11 @@ func validateDockerEnv(ctx context.Context, t *testing.T, profile string) {
|
|||
func validateStartWithProxy(ctx context.Context, t *testing.T, profile string) {
|
||||
srv, err := startHTTPProxy(t)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to set up the test proxy: %s", err)
|
||||
t.Fatalf("failed to set up the test proxy: %s", err)
|
||||
}
|
||||
|
||||
// Use more memory so that we may reliably fit MySQL and nginx
|
||||
startArgs := append([]string{"start", "-p", profile, "--wait=true", "--memory", "2500MB"}, StartArgs()...)
|
||||
startArgs := append([]string{"start", "-p", profile, "--wait=true"}, StartArgs()...)
|
||||
c := exec.CommandContext(ctx, Target(), startArgs...)
|
||||
env := os.Environ()
|
||||
env = append(env, fmt.Sprintf("HTTP_PROXY=%s", srv.Addr))
|
||||
|
|
@ -192,7 +192,7 @@ func validateStartWithProxy(ctx context.Context, t *testing.T, profile string) {
|
|||
c.Env = env
|
||||
rr, err := Run(t, c)
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed minikube start. args %q: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
want := "Found network options:"
|
||||
|
|
@ -210,10 +210,10 @@ func validateStartWithProxy(ctx context.Context, t *testing.T, profile string) {
|
|||
func validateKubeContext(ctx context.Context, t *testing.T, profile string) {
|
||||
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "config", "current-context"))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to get current-context. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
if !strings.Contains(rr.Stdout.String(), profile) {
|
||||
t.Errorf("current-context = %q, want %q", rr.Stdout.String(), profile)
|
||||
t.Errorf("expected current-context = %q, but got *%q*", profile, rr.Stdout.String())
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -221,22 +221,23 @@ func validateKubeContext(ctx context.Context, t *testing.T, profile string) {
|
|||
func validateKubectlGetPods(ctx context.Context, t *testing.T, profile string) {
|
||||
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "po", "-A"))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to get kubectl pods: args %q : %v", rr.Command(), err)
|
||||
}
|
||||
if rr.Stderr.String() != "" {
|
||||
t.Errorf("%s: got unexpected stderr: %s", rr.Command(), rr.Stderr)
|
||||
t.Errorf("expected stderr to be empty but got *%q*: args %q", rr.Stderr, rr.Command())
|
||||
}
|
||||
if !strings.Contains(rr.Stdout.String(), "kube-system") {
|
||||
t.Errorf("%s = %q, want *kube-system*", rr.Command(), rr.Stdout)
|
||||
t.Errorf("expected stdout to include *kube-system* but got *%q*. args: %q", rr.Stdout, rr.Command())
|
||||
}
|
||||
}
|
||||
|
||||
// validateMinikubeKubectl validates that the `minikube kubectl` command returns content
|
||||
func validateMinikubeKubectl(ctx context.Context, t *testing.T, profile string) {
|
||||
kubectlArgs := []string{"kubectl", "--", "get", "pods"}
|
||||
// Must set the profile so that it knows what version of Kubernetes to use
|
||||
kubectlArgs := []string{"-p", profile, "kubectl", "--", "--context", profile, "get", "pods"}
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), kubectlArgs...))
|
||||
if err != nil {
|
||||
t.Fatalf("%s failed: %v", rr.Args, err)
|
||||
t.Fatalf("failed to get pods. args %q: %v", rr.Command(), err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -244,12 +245,12 @@ func validateMinikubeKubectl(ctx context.Context, t *testing.T, profile string)
|
|||
func validateComponentHealth(ctx context.Context, t *testing.T, profile string) {
|
||||
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "cs", "-o=json"))
|
||||
if err != nil {
|
||||
t.Fatalf("%s failed: %v", rr.Args, err)
|
||||
t.Fatalf("failed to get components. args %q: %v", rr.Command(), err)
|
||||
}
|
||||
cs := api.ComponentStatusList{}
|
||||
d := json.NewDecoder(bytes.NewReader(rr.Stdout.Bytes()))
|
||||
if err := d.Decode(&cs); err != nil {
|
||||
t.Fatalf("decode: %v", err)
|
||||
t.Fatalf("failed to decode kubectl json output: args %q : %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
for _, i := range cs.Items {
|
||||
|
|
@ -269,40 +270,41 @@ func validateComponentHealth(ctx context.Context, t *testing.T, profile string)
|
|||
func validateStatusCmd(ctx context.Context, t *testing.T, profile string) {
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status"))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to run minikube status. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
// Custom format
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "-f", "host:{{.Host}},kublet:{{.Kubelet}},apiserver:{{.APIServer}},kubeconfig:{{.Kubeconfig}}"))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to run minikube status with custom format: args %q: %v", rr.Command(), err)
|
||||
}
|
||||
match, _ := regexp.MatchString(`host:([A-z]+),kublet:([A-z]+),apiserver:([A-z]+),kubeconfig:([A-z]+)`, rr.Stdout.String())
|
||||
re := `host:([A-z]+),kublet:([A-z]+),apiserver:([A-z]+),kubeconfig:([A-z]+)`
|
||||
match, _ := regexp.MatchString(re, rr.Stdout.String())
|
||||
if !match {
|
||||
t.Errorf("%s failed: %v. Output for custom format did not match", rr.Args, err)
|
||||
t.Errorf("failed to match regex %q for minikube status with custom format. args %q. output %q", re, rr.Command(), rr.Output())
|
||||
}
|
||||
|
||||
// Json output
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "-o", "json"))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to run minikube status with json output. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
var jsonObject map[string]interface{}
|
||||
err = json.Unmarshal(rr.Stdout.Bytes(), &jsonObject)
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to decode json from minikube status. args %q. %v", rr.Command(), err)
|
||||
}
|
||||
if _, ok := jsonObject["Host"]; !ok {
|
||||
t.Errorf("%s failed: %v. Missing key %s in json object", rr.Args, err, "Host")
|
||||
t.Errorf("%q failed: %v. Missing key %s in json object", rr.Command(), err, "Host")
|
||||
}
|
||||
if _, ok := jsonObject["Kubelet"]; !ok {
|
||||
t.Errorf("%s failed: %v. Missing key %s in json object", rr.Args, err, "Kubelet")
|
||||
t.Errorf("%q failed: %v. Missing key %s in json object", rr.Command(), err, "Kubelet")
|
||||
}
|
||||
if _, ok := jsonObject["APIServer"]; !ok {
|
||||
t.Errorf("%s failed: %v. Missing key %s in json object", rr.Args, err, "APIServer")
|
||||
t.Errorf("%q failed: %v. Missing key %s in json object", rr.Command(), err, "APIServer")
|
||||
}
|
||||
if _, ok := jsonObject["Kubeconfig"]; !ok {
|
||||
t.Errorf("%s failed: %v. Missing key %s in json object", rr.Args, err, "Kubeconfig")
|
||||
t.Errorf("%q failed: %v. Missing key %s in json object", rr.Command(), err, "Kubeconfig")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -311,7 +313,7 @@ func validateDashboardCmd(ctx context.Context, t *testing.T, profile string) {
|
|||
args := []string{"dashboard", "--url", "-p", profile, "--alsologtostderr", "-v=1"}
|
||||
ss, err := Start(t, exec.CommandContext(ctx, Target(), args...))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", args, err)
|
||||
t.Errorf("failed to run minikube dashboard. args %q : %v", args, err)
|
||||
}
|
||||
defer func() {
|
||||
ss.Stop(t)
|
||||
|
|
@ -333,12 +335,12 @@ func validateDashboardCmd(ctx context.Context, t *testing.T, profile string) {
|
|||
|
||||
resp, err := retryablehttp.Get(u.String())
|
||||
if err != nil {
|
||||
t.Errorf("failed get: %v", err)
|
||||
t.Fatalf("failed to http get %q : %v", u.String(), err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Errorf("Unable to read http response body: %v", err)
|
||||
t.Errorf("failed to read http response body from dashboard %q: %v", u.String(), err)
|
||||
}
|
||||
t.Errorf("%s returned status code %d, expected %d.\nbody:\n%s", u, resp.StatusCode, http.StatusOK, body)
|
||||
}
|
||||
|
|
@ -348,12 +350,12 @@ func validateDashboardCmd(ctx context.Context, t *testing.T, profile string) {
|
|||
func validateDNS(ctx context.Context, t *testing.T, profile string) {
|
||||
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "busybox.yaml")))
|
||||
if err != nil {
|
||||
t.Fatalf("%s failed: %v", rr.Args, err)
|
||||
t.Fatalf("failed to kubectl replace busybox : args %q: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
names, err := PodWait(ctx, t, profile, "default", "integration-test=busybox", Minutes(4))
|
||||
if err != nil {
|
||||
t.Fatalf("wait: %v", err)
|
||||
t.Fatalf("failed waiting for busybox pod : %v", err)
|
||||
}
|
||||
|
||||
nslookup := func() error {
|
||||
|
|
@ -363,12 +365,12 @@ func validateDNS(ctx context.Context, t *testing.T, profile string) {
|
|||
|
||||
// If the coredns process was stable, this retry wouldn't be necessary.
|
||||
if err = retry.Expo(nslookup, 1*time.Second, Minutes(1)); err != nil {
|
||||
t.Errorf("nslookup failing: %v", err)
|
||||
t.Errorf("failed to do nslookup on kubernetes.default: %v", err)
|
||||
}
|
||||
|
||||
want := []byte("10.96.0.1")
|
||||
if !bytes.Contains(rr.Stdout.Bytes(), want) {
|
||||
t.Errorf("nslookup: got=%q, want=*%q*", rr.Stdout.Bytes(), want)
|
||||
t.Errorf("failed nslookup: got=%q, want=*%q*", rr.Stdout.Bytes(), want)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -406,29 +408,29 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) {
|
|||
t.Run("cache", func(t *testing.T) {
|
||||
t.Run("add", func(t *testing.T) {
|
||||
for _, img := range []string{"busybox:latest", "busybox:1.28.4-glibc", "k8s.gcr.io/pause:latest"} {
|
||||
_, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "add", img))
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "add", img))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to cache image %q", img)
|
||||
t.Errorf("failed to cache add image %q. args %q err %v", img, rr.Command(), err)
|
||||
}
|
||||
}
|
||||
})
|
||||
t.Run("delete_busybox:1.28.4-glibc", func(t *testing.T) {
|
||||
_, err := Run(t, exec.CommandContext(ctx, Target(), "cache", "delete", "busybox:1.28.4-glibc"))
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "cache", "delete", "busybox:1.28.4-glibc"))
|
||||
if err != nil {
|
||||
t.Errorf("failed to delete image busybox:1.28.4-glibc from cache: %v", err)
|
||||
t.Errorf("failed to delete image busybox:1.28.4-glibc from cache. args %q: %v", rr.Command(), err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("list", func(t *testing.T) {
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "cache", "list"))
|
||||
if err != nil {
|
||||
t.Errorf("cache list failed: %v", err)
|
||||
t.Errorf("failed to do cache list. args %q: %v", rr.Command(), err)
|
||||
}
|
||||
if !strings.Contains(rr.Output(), "k8s.gcr.io/pause") {
|
||||
t.Errorf("cache list did not include k8s.gcr.io/pause")
|
||||
t.Errorf("expected 'cache list' output to include 'k8s.gcr.io/pause' but got:\n ***%q***", rr.Output())
|
||||
}
|
||||
if strings.Contains(rr.Output(), "busybox:1.28.4-glibc") {
|
||||
t.Errorf("cache list should not include busybox:1.28.4-glibc")
|
||||
t.Errorf("expected 'cache list' output not to include busybox:1.28.4-glibc but got:\n ***%q***", rr.Output())
|
||||
}
|
||||
})
|
||||
|
||||
|
|
@ -438,7 +440,7 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) {
|
|||
t.Errorf("failed to get images by %q ssh %v", rr.Command(), err)
|
||||
}
|
||||
if !strings.Contains(rr.Output(), "1.28.4-glibc") {
|
||||
t.Errorf("expected '1.28.4-glibc' to be in the output: %s", rr.Output())
|
||||
t.Errorf("expected '1.28.4-glibc' to be in the output but got %q", rr.Output())
|
||||
}
|
||||
|
||||
})
|
||||
|
|
@ -453,17 +455,17 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) {
|
|||
// make sure the image is deleted.
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo", "crictl", "inspecti", img))
|
||||
if err == nil {
|
||||
t.Errorf("expected the image be deleted and get error but got nil error ! cmd: %q", rr.Command())
|
||||
t.Errorf("expected an error. because image should not exist. but got *nil error* ! cmd: %q", rr.Command())
|
||||
}
|
||||
// minikube cache reload.
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "reload"))
|
||||
if err != nil {
|
||||
t.Errorf("expected %q to run successfully but got error %v", rr.Command(), err)
|
||||
t.Errorf("expected %q to run successfully but got error: %v", rr.Command(), err)
|
||||
}
|
||||
// make sure 'cache reload' brought back the manually deleted image.
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo", "crictl", "inspecti", img))
|
||||
if err != nil {
|
||||
t.Errorf("expected to get no error for %q but got %v", rr.Command(), err)
|
||||
t.Errorf("expected %q to run successfully but got error: %v", rr.Command(), err)
|
||||
}
|
||||
})
|
||||
|
||||
|
|
@ -489,16 +491,16 @@ func validateConfigCmd(ctx context.Context, t *testing.T, profile string) {
|
|||
args := append([]string{"-p", profile, "config"}, tc.args...)
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||
if err != nil && tc.wantErr == "" {
|
||||
t.Errorf("unexpected failure: %s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to config minikube. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
got := strings.TrimSpace(rr.Stdout.String())
|
||||
if got != tc.wantOut {
|
||||
t.Errorf("%s stdout got: %q, want: %q", rr.Command(), got, tc.wantOut)
|
||||
t.Errorf("expected config output for %q to be -%q- but got *%q*", rr.Command(), tc.wantOut, got)
|
||||
}
|
||||
got = strings.TrimSpace(rr.Stderr.String())
|
||||
if got != tc.wantErr {
|
||||
t.Errorf("%s stderr got: %q, want: %q", rr.Command(), got, tc.wantErr)
|
||||
t.Errorf("expected config error for %q to be -%q- but got *%q*", rr.Command(), tc.wantErr, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -507,11 +509,11 @@ func validateConfigCmd(ctx context.Context, t *testing.T, profile string) {
|
|||
func validateLogsCmd(ctx context.Context, t *testing.T, profile string) {
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "logs"))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("%s failed: %v", rr.Command(), err)
|
||||
}
|
||||
for _, word := range []string{"Docker", "apiserver", "Linux", "kubelet"} {
|
||||
if !strings.Contains(rr.Stdout.String(), word) {
|
||||
t.Errorf("minikube logs missing expected word: %q", word)
|
||||
t.Errorf("excpeted minikube logs to include word: -%q- but got \n***%q***\n", word, rr.Output())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -523,16 +525,16 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) {
|
|||
nonexistentProfile := "lis"
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", nonexistentProfile))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("%s failed: %v", rr.Command(), err)
|
||||
}
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "profile", "list", "--output", "json"))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("%s failed: %v", rr.Command(), err)
|
||||
}
|
||||
var profileJSON map[string][]map[string]interface{}
|
||||
err = json.Unmarshal(rr.Stdout.Bytes(), &profileJSON)
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("%s failed: %v", rr.Command(), err)
|
||||
}
|
||||
for profileK := range profileJSON {
|
||||
for _, p := range profileJSON[profileK] {
|
||||
|
|
@ -548,7 +550,7 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) {
|
|||
// List profiles
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", "list"))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to list profiles: args %q : %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
// Table output
|
||||
|
|
@ -562,21 +564,20 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) {
|
|||
}
|
||||
}
|
||||
if !profileExists {
|
||||
t.Errorf("%s failed: Missing profile '%s'. Got '\n%s\n'", rr.Args, profile, rr.Stdout.String())
|
||||
t.Errorf("expected 'profile list' output to include %q but got *%q*. args: %q", profile, rr.Stdout.String(), rr.Command())
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
t.Run("profile_json_output", func(t *testing.T) {
|
||||
// Json output
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", "list", "--output", "json"))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to list profiles with json format. args %q: %v", rr.Command(), err)
|
||||
}
|
||||
var jsonObject map[string][]map[string]interface{}
|
||||
err = json.Unmarshal(rr.Stdout.Bytes(), &jsonObject)
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to decode json from profile list: args %q: %v", rr.Command(), err)
|
||||
}
|
||||
validProfiles := jsonObject["valid"]
|
||||
profileExists := false
|
||||
|
|
@ -587,7 +588,7 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) {
|
|||
}
|
||||
}
|
||||
if !profileExists {
|
||||
t.Errorf("%s failed: Missing profile '%s'. Got '\n%s\n'", rr.Args, profile, rr.Stdout.String())
|
||||
t.Errorf("expected the json of 'profile list' to include %q but got *%q*. args: %q", profile, rr.Stdout.String(), rr.Command())
|
||||
}
|
||||
|
||||
})
|
||||
|
|
@ -597,56 +598,56 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) {
|
|||
func validateServiceCmd(ctx context.Context, t *testing.T, profile string) {
|
||||
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "deployment", "hello-node", "--image=gcr.io/hello-minikube-zero-install/hello-node"))
|
||||
if err != nil {
|
||||
t.Logf("%s failed: %v (may not be an error)", rr.Args, err)
|
||||
t.Logf("%q failed: %v (may not be an error).", rr.Command(), err)
|
||||
}
|
||||
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "expose", "deployment", "hello-node", "--type=NodePort", "--port=8080"))
|
||||
if err != nil {
|
||||
t.Logf("%s failed: %v (may not be an error)", rr.Args, err)
|
||||
t.Logf("%q failed: %v (may not be an error)", rr.Command(), err)
|
||||
}
|
||||
|
||||
if _, err := PodWait(ctx, t, profile, "default", "app=hello-node", Minutes(10)); err != nil {
|
||||
t.Fatalf("wait: %v", err)
|
||||
t.Fatalf("failed waiting for hello-node pod: %v", err)
|
||||
}
|
||||
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "list"))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to do service list. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
if !strings.Contains(rr.Stdout.String(), "hello-node") {
|
||||
t.Errorf("service list got %q, wanted *hello-node*", rr.Stdout.String())
|
||||
t.Errorf("expected 'service list' to contain *hello-node* but got -%q-", rr.Stdout.String())
|
||||
}
|
||||
|
||||
// Test --https --url mode
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "--namespace=default", "--https", "--url", "hello-node"))
|
||||
if err != nil {
|
||||
t.Fatalf("%s failed: %v", rr.Args, err)
|
||||
t.Fatalf("failed to get service url. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
if rr.Stderr.String() != "" {
|
||||
t.Errorf("unexpected stderr output: %s", rr.Stderr)
|
||||
t.Errorf("expected stderr to be empty but got *%q*", rr.Stderr)
|
||||
}
|
||||
|
||||
endpoint := strings.TrimSpace(rr.Stdout.String())
|
||||
u, err := url.Parse(endpoint)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse %q: %v", endpoint, err)
|
||||
t.Fatalf("failed to parse service url endpoint %q: %v", endpoint, err)
|
||||
}
|
||||
if u.Scheme != "https" {
|
||||
t.Errorf("got scheme: %q, expected: %q", u.Scheme, "https")
|
||||
t.Errorf("expected scheme to be 'https' but got %q", u.Scheme)
|
||||
}
|
||||
|
||||
// Test --format=IP
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "hello-node", "--url", "--format={{.IP}}"))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to get service url with custom format. args %q: %v", rr.Command(), err)
|
||||
}
|
||||
if strings.TrimSpace(rr.Stdout.String()) != u.Hostname() {
|
||||
t.Errorf("%s = %q, wanted %q", rr.Args, rr.Stdout.String(), u.Hostname())
|
||||
t.Errorf("expected 'service --format={{.IP}}' output to be -%q- but got *%q* . args %q.", u.Hostname(), rr.Stdout.String(), rr.Command())
|
||||
}
|
||||
|
||||
// Test a regular URLminikube
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "hello-node", "--url"))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to get service url. args: %q: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
endpoint = strings.TrimSpace(rr.Stdout.String())
|
||||
|
|
@ -655,7 +656,7 @@ func validateServiceCmd(ctx context.Context, t *testing.T, profile string) {
|
|||
t.Fatalf("failed to parse %q: %v", endpoint, err)
|
||||
}
|
||||
if u.Scheme != "http" {
|
||||
t.Fatalf("got scheme: %q, expected: %q", u.Scheme, "http")
|
||||
t.Fatalf("expected scheme to be -%q- got scheme: *%q*", "http", u.Scheme)
|
||||
}
|
||||
|
||||
t.Logf("url: %s", endpoint)
|
||||
|
|
@ -664,7 +665,7 @@ func validateServiceCmd(ctx context.Context, t *testing.T, profile string) {
|
|||
t.Fatalf("get failed: %v\nresp: %v", err, resp)
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("%s = status code %d, want %d", u, resp.StatusCode, http.StatusOK)
|
||||
t.Fatalf("expected status code for %q to be -%q- but got *%q*", endpoint, http.StatusOK, resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -673,23 +674,23 @@ func validateAddonsCmd(ctx context.Context, t *testing.T, profile string) {
|
|||
// Table output
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "list"))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to do addon list: args %q : %v", rr.Command(), err)
|
||||
}
|
||||
for _, a := range []string{"dashboard", "ingress", "ingress-dns"} {
|
||||
if !strings.Contains(rr.Output(), a) {
|
||||
t.Errorf("addon list expected to include %q but didn't output: %q", a, rr.Output())
|
||||
t.Errorf("expected 'addon list' output to include -%q- but got *%q*", a, rr.Output())
|
||||
}
|
||||
}
|
||||
|
||||
// Json output
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "list", "-o", "json"))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to do addon list with json output. args %q: %v", rr.Command(), err)
|
||||
}
|
||||
var jsonObject map[string]interface{}
|
||||
err = json.Unmarshal(rr.Stdout.Bytes(), &jsonObject)
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to decode addon list json output : %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -701,10 +702,10 @@ func validateSSHCmd(ctx context.Context, t *testing.T, profile string) {
|
|||
want := "hello\n"
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("echo hello")))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to run an ssh command. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
if rr.Stdout.String() != want {
|
||||
t.Errorf("%v = %q, want = %q", rr.Args, rr.Stdout.String(), want)
|
||||
t.Errorf("expected minikube ssh command output to be -%q- but got *%q*. args %q", want, rr.Stdout.String(), rr.Command())
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -712,12 +713,12 @@ func validateSSHCmd(ctx context.Context, t *testing.T, profile string) {
|
|||
func validateMySQL(ctx context.Context, t *testing.T, profile string) {
|
||||
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "mysql.yaml")))
|
||||
if err != nil {
|
||||
t.Fatalf("%s failed: %v", rr.Args, err)
|
||||
t.Fatalf("failed to kubectl replace mysql: args %q failed: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
names, err := PodWait(ctx, t, profile, "default", "app=mysql", Minutes(10))
|
||||
if err != nil {
|
||||
t.Fatalf("podwait: %v", err)
|
||||
t.Fatalf("failed waiting for mysql pod: %v", err)
|
||||
}
|
||||
|
||||
// Retry, as mysqld first comes up without users configured. Scan for names in case of a reschedule.
|
||||
|
|
@ -725,8 +726,8 @@ func validateMySQL(ctx context.Context, t *testing.T, profile string) {
|
|||
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "exec", names[0], "--", "mysql", "-ppassword", "-e", "show databases;"))
|
||||
return err
|
||||
}
|
||||
if err = retry.Expo(mysql, 2*time.Second, Seconds(180)); err != nil {
|
||||
t.Errorf("mysql failing: %v", err)
|
||||
if err = retry.Expo(mysql, 1*time.Second, Seconds(200)); err != nil {
|
||||
t.Errorf("failed to exec 'mysql -ppassword -e show databases;': %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -756,12 +757,12 @@ func setupFileSync(ctx context.Context, t *testing.T, profile string) {
|
|||
t.Logf("local sync path: %s", p)
|
||||
err := copy.Copy("./testdata/sync.test", p)
|
||||
if err != nil {
|
||||
t.Fatalf("copy: %v", err)
|
||||
t.Fatalf("failed to copy ./testdata/sync.test : %v", err)
|
||||
}
|
||||
|
||||
err = copy.Copy("./testdata/minikube_test.pem", localTestCertPath())
|
||||
if err != nil {
|
||||
t.Fatalf("copy: %v", err)
|
||||
t.Fatalf("failed to copy ./testdata/minikube_test.pem : %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -775,14 +776,14 @@ func validateFileSync(ctx context.Context, t *testing.T, profile string) {
|
|||
t.Logf("Checking for existence of %s within VM", vp)
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("cat %s", vp)))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("%s failed: %v", rr.Command(), err)
|
||||
}
|
||||
got := rr.Stdout.String()
|
||||
t.Logf("file sync test content: %s", got)
|
||||
|
||||
expected, err := ioutil.ReadFile("./testdata/sync.test")
|
||||
if err != nil {
|
||||
t.Errorf("test file not found: %v", err)
|
||||
t.Errorf("failed to read test file '/testdata/sync.test' : %v", err)
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(string(expected), got); diff != "" {
|
||||
|
|
@ -812,13 +813,13 @@ func validateCertSync(ctx context.Context, t *testing.T, profile string) {
|
|||
t.Logf("Checking for existence of %s within VM", vp)
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("cat %s", vp)))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to check existence of %q inside minikube. args %q: %v", vp, rr.Command(), err)
|
||||
}
|
||||
|
||||
// Strip carriage returned by ssh
|
||||
got := strings.Replace(rr.Stdout.String(), "\r", "", -1)
|
||||
if diff := cmp.Diff(string(want), got); diff != "" {
|
||||
t.Errorf("minikube_test.pem -> %s mismatch (-want +got):\n%s", vp, diff)
|
||||
t.Errorf("failed verify pem file. minikube_test.pem -> %s mismatch (-want +got):\n%s", vp, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -827,7 +828,7 @@ func validateCertSync(ctx context.Context, t *testing.T, profile string) {
|
|||
func validateUpdateContextCmd(ctx context.Context, t *testing.T, profile string) {
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "update-context", "--alsologtostderr", "-v=2"))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to run minikube update-context: args %q: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
want := []byte("IP was already correctly configured")
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/vmpath"
|
||||
)
|
||||
|
||||
// TestGuestEnvironment verifies files and packges installed inside minikube ISO/Base image
|
||||
func TestGuestEnvironment(t *testing.T) {
|
||||
MaybeParallel(t)
|
||||
|
||||
|
|
@ -37,18 +38,18 @@ func TestGuestEnvironment(t *testing.T) {
|
|||
args := append([]string{"start", "-p", profile, "--install-addons=false", "--memory=1800", "--wait=false"}, StartArgs()...)
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to start minikube: args %q: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
// Run as a group so that our defer doesn't happen as tests are runnings
|
||||
t.Run("Binaries", func(t *testing.T) {
|
||||
for _, pkg := range []string{"git", "rsync", "curl", "wget", "socat", "iptables", "VBoxControl", "VBoxService"} {
|
||||
for _, pkg := range []string{"git", "rsync", "curl", "wget", "socat", "iptables", "VBoxControl", "VBoxService", "crictl", "podman", "docker"} {
|
||||
pkg := pkg
|
||||
t.Run(pkg, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("which %s", pkg)))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to verify existence of %q binary : args %q: %v", pkg, rr.Command(), err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
@ -67,9 +68,9 @@ func TestGuestEnvironment(t *testing.T) {
|
|||
mount := mount
|
||||
t.Run(mount, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("df -t ext4 %s | grep %s", mount, mount)))
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Targt(), "-p", profile, "ssh", fmt.Sprintf("df -t ext4 %s | grep %s", mount, mount)))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to verify existence of %q mount. args %q: %v", mount, rr.Command(), err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -50,59 +50,59 @@ func TestGvisorAddon(t *testing.T) {
|
|||
startArgs := append([]string{"start", "-p", profile, "--memory=2200", "--container-runtime=containerd", "--docker-opt", "containerd=/var/run/containerd/containerd.sock"}, StartArgs()...)
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...))
|
||||
if err != nil {
|
||||
t.Fatalf("%s failed: %v", rr.Args, err)
|
||||
t.Fatalf("failed to start minikube: args %q: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
// If it exists, include a locally built gvisor image
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "add", "gcr.io/k8s-minikube/gvisor-addon:2"))
|
||||
if err != nil {
|
||||
t.Logf("%s failed: %v (won't test local image)", rr.Args, err)
|
||||
t.Logf("%s failed: %v (won't test local image)", rr.Command(), err)
|
||||
}
|
||||
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "enable", "gvisor"))
|
||||
if err != nil {
|
||||
t.Fatalf("%s failed: %v", rr.Args, err)
|
||||
t.Fatalf("%s failed: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
if _, err := PodWait(ctx, t, profile, "kube-system", "kubernetes.io/minikube-addons=gvisor", Minutes(4)); err != nil {
|
||||
t.Fatalf("waiting for gvisor controller to be up: %v", err)
|
||||
t.Fatalf("failed waiting for 'gvisor controller' pod: %v", err)
|
||||
}
|
||||
|
||||
// Create an untrusted workload
|
||||
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "nginx-untrusted.yaml")))
|
||||
if err != nil {
|
||||
t.Fatalf("%s failed: %v", rr.Args, err)
|
||||
t.Fatalf("%s failed: %v", rr.Command(), err)
|
||||
}
|
||||
// Create gvisor workload
|
||||
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "nginx-gvisor.yaml")))
|
||||
if err != nil {
|
||||
t.Fatalf("%s failed: %v", rr.Args, err)
|
||||
t.Fatalf("%s failed: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
if _, err := PodWait(ctx, t, profile, "default", "run=nginx,untrusted=true", Minutes(4)); err != nil {
|
||||
t.Errorf("nginx: %v", err)
|
||||
t.Errorf("failed waiting for nginx pod: %v", err)
|
||||
}
|
||||
if _, err := PodWait(ctx, t, profile, "default", "run=nginx,runtime=gvisor", Minutes(4)); err != nil {
|
||||
t.Errorf("nginx: %v", err)
|
||||
t.Errorf("failed waitinf for gvisor pod: %v", err)
|
||||
}
|
||||
|
||||
// Ensure that workloads survive a restart
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile))
|
||||
if err != nil {
|
||||
t.Fatalf("%s failed: %v", rr.Args, err)
|
||||
t.Fatalf("faild stopping minikube. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...))
|
||||
if err != nil {
|
||||
t.Fatalf("%s failed: %v", rr.Args, err)
|
||||
t.Fatalf("failed starting minikube after a stop. args %q, %v", rr.Command(), err)
|
||||
}
|
||||
if _, err := PodWait(ctx, t, profile, "kube-system", "kubernetes.io/minikube-addons=gvisor", Minutes(4)); err != nil {
|
||||
t.Errorf("waiting for gvisor controller to be up: %v", err)
|
||||
t.Errorf("failed waiting for 'gvisor controller' pod : %v", err)
|
||||
}
|
||||
if _, err := PodWait(ctx, t, profile, "default", "run=nginx,untrusted=true", Minutes(4)); err != nil {
|
||||
t.Errorf("nginx: %v", err)
|
||||
t.Errorf("failed waiting for 'nginx' pod : %v", err)
|
||||
}
|
||||
if _, err := PodWait(ctx, t, profile, "default", "run=nginx,runtime=gvisor", Minutes(4)); err != nil {
|
||||
t.Errorf("nginx: %v", err)
|
||||
t.Errorf("failed waiting for 'gvisor' pod : %v", err)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -63,14 +63,24 @@ func (rr RunResult) Command() string {
|
|||
return sb.String()
|
||||
}
|
||||
|
||||
// indentLines indents every line in a bytes.Buffer and returns it as string
|
||||
func indentLines(b []byte) string {
|
||||
scanner := bufio.NewScanner(bytes.NewReader(b))
|
||||
var lines string
|
||||
for scanner.Scan() {
|
||||
lines = lines + "\t" + scanner.Text() + "\n"
|
||||
}
|
||||
return lines
|
||||
}
|
||||
|
||||
// Output returns human-readable output for an execution result
|
||||
func (rr RunResult) Output() string {
|
||||
var sb strings.Builder
|
||||
if rr.Stdout.Len() > 0 {
|
||||
sb.WriteString(fmt.Sprintf("-- stdout --\n%s\n-- /stdout --", rr.Stdout.Bytes()))
|
||||
sb.WriteString(fmt.Sprintf("\n-- stdout --\n%s\n-- /stdout --", indentLines(rr.Stdout.Bytes())))
|
||||
}
|
||||
if rr.Stderr.Len() > 0 {
|
||||
sb.WriteString(fmt.Sprintf("\n** stderr ** \n%s\n** /stderr **", rr.Stderr.Bytes()))
|
||||
sb.WriteString(fmt.Sprintf("\n** stderr ** \n%s\n** /stderr **", indentLines(rr.Stderr.Bytes())))
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -46,22 +46,22 @@ func TestChangeNoneUser(t *testing.T) {
|
|||
startArgs := append([]string{"CHANGE_MINIKUBE_NONE_USER=true", Target(), "start", "--wait=false"}, StartArgs()...)
|
||||
rr, err := Run(t, exec.CommandContext(ctx, "/usr/bin/env", startArgs...))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("%s failed: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "delete"))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("%s failed: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
rr, err = Run(t, exec.CommandContext(ctx, "/usr/bin/env", startArgs...))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("%s failed: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "status"))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("%s failed: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
username := os.Getenv("SUDO_USER")
|
||||
|
|
|
|||
|
|
@ -92,7 +92,7 @@ func TestStartStop(t *testing.T) {
|
|||
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...))
|
||||
if err != nil {
|
||||
t.Fatalf("%s failed: %v", rr.Args, err)
|
||||
t.Fatalf("failed starting minikube -first start-. args %q: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
if !strings.Contains(tc.name, "cni") {
|
||||
|
|
@ -101,43 +101,43 @@ func TestStartStop(t *testing.T) {
|
|||
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile, "--alsologtostderr", "-v=3"))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed stopping minikube - first stop-. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
// The none driver never really stops
|
||||
if !NoneDriver() {
|
||||
got := Status(ctx, t, Target(), profile, "Host")
|
||||
if got != state.Stopped.String() {
|
||||
t.Errorf("post-stop host status = %q; want = %q", got, state.Stopped)
|
||||
t.Errorf("expected post-stop host status to be -%q- but got *%q*", state.Stopped, got)
|
||||
}
|
||||
}
|
||||
|
||||
// Enable an addon to assert it comes up afterwards
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "addons", "enable", "dashboard", "-p", profile))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to enable an addon post-stop. args %q: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...))
|
||||
if err != nil {
|
||||
// Explicit fatal so that failures don't move directly to deletion
|
||||
t.Fatalf("%s failed: %v", rr.Args, err)
|
||||
t.Fatalf("failed to start minikube post-stop. args %q: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
if strings.Contains(tc.name, "cni") {
|
||||
t.Logf("WARNING: cni mode requires additional setup before pods can schedule :(")
|
||||
} else {
|
||||
if _, err := PodWait(ctx, t, profile, "default", "integration-test=busybox", Minutes(4)); err != nil {
|
||||
t.Fatalf("post-stop-start pod wait: %v", err)
|
||||
t.Fatalf("failed waiting for pod 'busybox' post-stop-start: %v", err)
|
||||
}
|
||||
if _, err := PodWait(ctx, t, profile, "kubernetes-dashboard", "k8s-app=kubernetes-dashboard", Minutes(4)); err != nil {
|
||||
t.Fatalf("post-stop-start addon wait: %v", err)
|
||||
t.Fatalf("failed waiting for 'addon dashboard' pod post-stop-start: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
got := Status(ctx, t, Target(), profile, "Host")
|
||||
if got != state.Running.String() {
|
||||
t.Errorf("post-start host status = %q; want = %q", got, state.Running)
|
||||
t.Errorf("expected host status after start-stop-start to be -%q- but got *%q*", state.Running, got)
|
||||
}
|
||||
|
||||
if !NoneDriver() {
|
||||
|
|
@ -150,7 +150,7 @@ func TestStartStop(t *testing.T) {
|
|||
// Normally handled by cleanuprofile, but not fatal there
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "delete", "-p", profile))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to clean up: args %q: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "config", "get-contexts", profile))
|
||||
|
|
@ -158,7 +158,7 @@ func TestStartStop(t *testing.T) {
|
|||
t.Logf("config context error: %v (may be ok)", err)
|
||||
}
|
||||
if rr.ExitCode != 1 {
|
||||
t.Errorf("wanted exit code 1, got %d. output: %s", rr.ExitCode, rr.Output())
|
||||
t.Errorf("expected exit code 1, got %d. output: %s", rr.ExitCode, rr.Output())
|
||||
}
|
||||
}
|
||||
})
|
||||
|
|
@ -182,14 +182,14 @@ func TestStartStopWithPreload(t *testing.T) {
|
|||
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...))
|
||||
if err != nil {
|
||||
t.Fatalf("%s failed: %v", rr.Args, err)
|
||||
t.Fatalf("%s failed: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
// Now, pull the busybox image into the VMs docker daemon
|
||||
image := "busybox"
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "ssh", "-p", profile, "--", "docker", "pull", image))
|
||||
if err != nil {
|
||||
t.Fatalf("%s failed: %v", rr.Args, err)
|
||||
t.Fatalf("%s failed: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
// Restart minikube with v1.17.3, which has a preloaded tarball
|
||||
|
|
@ -199,11 +199,11 @@ func TestStartStopWithPreload(t *testing.T) {
|
|||
startArgs = append(startArgs, fmt.Sprintf("--kubernetes-version=%s", k8sVersion))
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...))
|
||||
if err != nil {
|
||||
t.Fatalf("%s failed: %v", rr.Args, err)
|
||||
t.Fatalf("%s failed: %v", rr.Command(), err)
|
||||
}
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "ssh", "-p", profile, "--", "docker", "images"))
|
||||
if err != nil {
|
||||
t.Fatalf("%s failed: %v", rr.Args, err)
|
||||
t.Fatalf("%s failed: %v", rr.Command(), err)
|
||||
}
|
||||
if !strings.Contains(rr.Output(), image) {
|
||||
t.Fatalf("Expected to find %s in output of `docker images`, instead got %s", image, rr.Output())
|
||||
|
|
@ -217,7 +217,7 @@ func testPodScheduling(ctx context.Context, t *testing.T, profile string) {
|
|||
// schedule a pod to assert persistence
|
||||
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "busybox.yaml")))
|
||||
if err != nil {
|
||||
t.Fatalf("%s failed: %v", rr.Args, err)
|
||||
t.Fatalf("%s failed: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
// 8 minutes, because 4 is not enough for images to pull in all cases.
|
||||
|
|
@ -250,14 +250,14 @@ func testPulledImages(ctx context.Context, t *testing.T, profile string, version
|
|||
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "ssh", "-p", profile, "sudo crictl images -o json"))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed tp get images inside minikube. args %q: %v", rr.Command(), err)
|
||||
}
|
||||
jv := map[string][]struct {
|
||||
Tags []string `json:"repoTags"`
|
||||
}{}
|
||||
err = json.Unmarshal(rr.Stdout.Bytes(), &jv)
|
||||
if err != nil {
|
||||
t.Errorf("images unmarshal: %v", err)
|
||||
t.Errorf("failed to decode images json %v. output: %q", err, rr.Output())
|
||||
}
|
||||
found := map[string]bool{}
|
||||
for _, img := range jv["images"] {
|
||||
|
|
@ -274,7 +274,7 @@ func testPulledImages(ctx context.Context, t *testing.T, profile string, version
|
|||
}
|
||||
want, err := images.Kubeadm("", version)
|
||||
if err != nil {
|
||||
t.Errorf("kubeadm images: %v", version)
|
||||
t.Errorf("failed to get kubeadm images for %s : %v", version, err)
|
||||
}
|
||||
gotImages := []string{}
|
||||
for k := range found {
|
||||
|
|
@ -293,7 +293,7 @@ func testPause(ctx context.Context, t *testing.T, profile string) {
|
|||
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "pause", "-p", profile, "--alsologtostderr", "-v=1"))
|
||||
if err != nil {
|
||||
t.Fatalf("%s failed: %v", rr.Args, err)
|
||||
t.Fatalf("%s failed: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
got := Status(ctx, t, Target(), profile, "APIServer")
|
||||
|
|
@ -308,7 +308,7 @@ func testPause(ctx context.Context, t *testing.T, profile string) {
|
|||
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "unpause", "-p", profile, "--alsologtostderr", "-v=1"))
|
||||
if err != nil {
|
||||
t.Fatalf("%s failed: %v", rr.Args, err)
|
||||
t.Fatalf("%s failed: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
got = Status(ctx, t, Target(), profile, "APIServer")
|
||||
|
|
|
|||
|
|
@ -82,22 +82,23 @@ func TestVersionUpgrade(t *testing.T) {
|
|||
|
||||
rr, err = Run(t, exec.CommandContext(ctx, tf.Name(), "stop", "-p", profile))
|
||||
if err != nil {
|
||||
t.Fatalf("%s failed: %v", rr.Args, err)
|
||||
t.Fatalf("%s failed: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
rr, err = Run(t, exec.CommandContext(ctx, tf.Name(), "-p", profile, "status", "--format={{.Host}}"))
|
||||
if err != nil {
|
||||
t.Logf("status error: %v (may be ok)", err)
|
||||
}
|
||||
|
||||
got := strings.TrimSpace(rr.Stdout.String())
|
||||
if got != state.Stopped.String() {
|
||||
t.Errorf("status = %q; want = %q", got, state.Stopped.String())
|
||||
t.Errorf("FAILED: status = %q; want = %q", got, state.Stopped.String())
|
||||
}
|
||||
|
||||
args = append([]string{"start", "-p", profile, fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...)
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("failed to start minikube HEAD with newest k8s version. args: %s : %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
s, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "version", "--output=json"))
|
||||
|
|
@ -119,14 +120,16 @@ func TestVersionUpgrade(t *testing.T) {
|
|||
t.Fatalf("expected server version %s is not the same with latest version %s", cv.ServerVersion.GitVersion, constants.NewestKubernetesVersion)
|
||||
}
|
||||
|
||||
t.Logf("Attempting to downgrade Kubernetes (should fail)")
|
||||
args = append([]string{"start", "-p", profile, fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...)
|
||||
if rr, err := Run(t, exec.CommandContext(ctx, tf.Name(), args...)); err == nil {
|
||||
t.Fatalf("downgrading kubernetes should not be allowed. expected to see error but got %v for %q", err, rr.Args)
|
||||
t.Fatalf("downgrading kubernetes should not be allowed. expected to see error but got %v for %q", err, rr.Command())
|
||||
}
|
||||
|
||||
t.Logf("Attempting restart after unsuccessful downgrade")
|
||||
args = append([]string{"start", "-p", profile, fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...)
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
t.Errorf("start after failed upgrade: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,16 +1,12 @@
|
|||
{
|
||||
"\"The '{{.minikube_addon}}' addon is disabled": "",
|
||||
"\"{{.name}}\" profile does not exist": "",
|
||||
"\"{{.machineName}}\" does not exist, nothing to stop": "",
|
||||
"\"{{.name}}\" profile does not exist, trying anyways.": "",
|
||||
"\"{{.node_name}}\" stopped.": "",
|
||||
"\"{{.profile_name}}\" does not exist, nothing to stop": "",
|
||||
"\"{{.profile_name}}\" host does not exist, unable to show an IP": "",
|
||||
"'none' driver does not support 'minikube docker-env' command": "",
|
||||
"'none' driver does not support 'minikube mount' command": "",
|
||||
"'none' driver does not support 'minikube podman-env' command": "",
|
||||
"'none' driver does not support 'minikube ssh' command": "",
|
||||
"'{{.driver}}' driver reported an issue: {{.error}}": "",
|
||||
"'{{.profile}}' is not running": "",
|
||||
"- {{.profile}}": "",
|
||||
"A VPN or firewall is interfering with HTTP access to the minikube VM. Alternatively, try a different VM driver: https://minikube.sigs.k8s.io/docs/start/": "",
|
||||
"A firewall is blocking Docker the minikube VM from reaching the internet. You may need to configure it to use a proxy.": "",
|
||||
|
|
@ -32,12 +28,11 @@
|
|||
"Adds a node to the given cluster config, and starts it.": "",
|
||||
"Adds a node to the given cluster.": "",
|
||||
"Advanced Commands:": "",
|
||||
"After minikube restart the dockerd ports might have changed. To ensure docker-env works properly.\nPlease re-eval the docker-env command:\n\n\t'minikube -p {{.profile_name}} docker-env'": "",
|
||||
"Aliases": "",
|
||||
"Allow user prompts for more information": "",
|
||||
"Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to \\\"auto\\\" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers": "Alternatives Bild-Repository zum Abrufen von Docker-Images. Dies ist hilfreich, wenn Sie nur eingeschränkten Zugriff auf gcr.io haben. Stellen Sie \\\"auto\\\" ein, dann wählt minikube eins für sie aus. Nutzer vom chinesischen Festland können einen lokalen gcr.io-Mirror wie registry.cn-hangzhou.aliyuncs.com/google_containers verwenden.",
|
||||
"Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g)": "Größe des der minikube-VM zugewiesenen Arbeitsspeichers (Format: \u003cNummer\u003e [\u003cEinheit\u003e], wobei Einheit = b, k, m oder g)",
|
||||
"Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "",
|
||||
"Amount of RAM to allocate to Kubernetes (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "",
|
||||
"Amount of time to wait for a service in seconds": "",
|
||||
"Amount of time to wait for service in seconds": "",
|
||||
"Another hypervisor, such as VirtualBox, is conflicting with KVM. Please stop the other hypervisor, or use --driver to switch to it.": "",
|
||||
|
|
@ -48,6 +43,7 @@
|
|||
"Because you are using docker driver on Mac, the terminal needs to be open to run it.": "",
|
||||
"Bind Address: {{.Address}}": "",
|
||||
"Block until the apiserver is servicing API requests": "",
|
||||
"Both driver={{.driver}} and vm-driver={{.vmd}} have been set.\n\n Since vm-driver is deprecated, minikube will default to driver={{.driver}}.\n\n If vm-driver is set in the global config, please run \"minikube config unset vm-driver\" to resolve this warning.": "",
|
||||
"Cannot find directory {{.path}} for mount": "",
|
||||
"Cannot use both --output and --format options": "",
|
||||
"Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start": "",
|
||||
|
|
@ -66,9 +62,9 @@
|
|||
"Could not process error from failed deletion": "",
|
||||
"Could not process errors from failed deletion": "",
|
||||
"Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.": "Ländercode des zu verwendenden Image Mirror. Lassen Sie dieses Feld leer, um den globalen zu verwenden. Nutzer vom chinesischen Festland stellen cn ein.",
|
||||
"Creating Kubernetes in {{.driver_name}} container with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "",
|
||||
"Creating Kubernetes in {{.driver_name}} {{.machine_type}} with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "",
|
||||
"Creating mount {{.name}} ...": "Bereitstellung {{.name}} wird erstellt...",
|
||||
"Creating {{.driver_name}} VM (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "",
|
||||
"Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "",
|
||||
"DEPRECATED, use `driver` instead.": "",
|
||||
"Default group id used for the mount": "",
|
||||
"Default user id used for the mount": "",
|
||||
|
|
@ -97,10 +93,9 @@
|
|||
"Done! kubectl is now configured to use \"{{.name}}\"": "",
|
||||
"Done! kubectl is now configured to use \"{{.name}}__1": "Fertig! kubectl ist jetzt für die Verwendung von \"{{.name}}\" konfiguriert",
|
||||
"Download complete!": "Download abgeschlossen!",
|
||||
"Downloading Kubernetes {{.version}} preload ...": "",
|
||||
"Downloading VM boot image ...": "",
|
||||
"Downloading driver {{.driver}}:": "",
|
||||
"Downloading preloaded images tarball for k8s {{.version}} ...": "",
|
||||
"Downloading {{.name}} {{.version}}": "",
|
||||
"ERROR creating `registry-creds-acr` secret": "",
|
||||
"ERROR creating `registry-creds-dpr` secret": "",
|
||||
"ERROR creating `registry-creds-ecr` secret: {{.error}}": "",
|
||||
|
|
@ -109,7 +104,6 @@
|
|||
"Enable addons. see `minikube addons list` for a list of valid addon names.": "",
|
||||
"Enable experimental NVIDIA GPU support in minikube": "Experimentellen NVIDIA GPU-Support in minikube aktivieren",
|
||||
"Enable host resolver for NAT DNS requests (virtualbox driver only)": "Host Resolver für NAT DNS-Anfragen aktivieren (nur Virtualbox-Treiber)",
|
||||
"Enable istio needs {{.minMem}} MB of memory and {{.minCpus}} CPUs.": "",
|
||||
"Enable proxy for NAT DNS requests (virtualbox driver only)": "Proxy für NAT-DNS-Anforderungen aktivieren (nur Virtualbox-Treiber)",
|
||||
"Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \\\"--network-plugin=cni\\": "Standard-CNI-Plugin-in (/etc/cni/net.d/k8s.conf) aktivieren. Wird in Verbindung mit \"--network-plugin = cni\" verwendet",
|
||||
"Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \\\"--network-plugin=cni\\\".": "",
|
||||
|
|
@ -131,45 +125,29 @@
|
|||
"Error finding port for mount": "",
|
||||
"Error generating set output": "",
|
||||
"Error generating unset output": "",
|
||||
"Error getting IP": "",
|
||||
"Error getting client": "",
|
||||
"Error getting client: {{.error}}": "",
|
||||
"Error getting cluster": "",
|
||||
"Error getting cluster bootstrapper": "",
|
||||
"Error getting cluster config": "",
|
||||
"Error getting config": "",
|
||||
"Error getting control plane": "",
|
||||
"Error getting host": "",
|
||||
"Error getting host IP": "",
|
||||
"Error getting host status": "",
|
||||
"Error getting machine logs": "",
|
||||
"Error getting port binding for '{{.driver_name}} driver: {{.error}}": "",
|
||||
"Error getting primary control plane": "",
|
||||
"Error getting primary cp": "",
|
||||
"Error getting service status": "",
|
||||
"Error getting service with namespace: {{.namespace}} and labels {{.labelName}}:{{.addonName}}: {{.error}}": "",
|
||||
"Error getting ssh client": "",
|
||||
"Error getting the host IP address to use from within the VM": "",
|
||||
"Error host driver ip status": "",
|
||||
"Error killing mount process": "",
|
||||
"Error loading api": "",
|
||||
"Error loading profile config": "",
|
||||
"Error loading profile config: {{.error}}": "",
|
||||
"Error loading profile {{.name}}: {{.error}}": "Fehler beim Laden des Profils {{.name}}: {{.error}}",
|
||||
"Error opening service": "",
|
||||
"Error parsing Driver version: {{.error}}": "Fehler beim Parsen der Driver-Version: {{.error}}",
|
||||
"Error parsing minikube version: {{.error}}": "Fehler beim Parsen der minikube-Version: {{.error}}",
|
||||
"Error reading {{.path}}: {{.error}}": "",
|
||||
"Error retrieving node": "",
|
||||
"Error starting cluster": "",
|
||||
"Error starting mount": "",
|
||||
"Error starting node": "",
|
||||
"Error while setting kubectl current context : {{.error}}": "",
|
||||
"Error writing mount pid": "",
|
||||
"Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}\"": "",
|
||||
"Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}": "Fehler: Sie haben Kubernetes v{{.new}} ausgewählt, aber auf dem vorhandenen Cluster für Ihr Profil wird Kubernetes v{{.old}} ausgeführt. Zerstörungsfreie Downgrades werden nicht unterstützt. Sie können jedoch mit einer der folgenden Optionen fortfahren:\n* Erstellen Sie den Cluster mit Kubernetes v{{.new}} neu: Führen Sie \"minikube delete {{.profile}}\" und dann \"minikube start {{.profile}} - kubernetes-version = {{.new}}\" aus.\n* Erstellen Sie einen zweiten Cluster mit Kubernetes v{{.new}}: Führen Sie \"minikube start -p \u003cnew name\u003e --kubernetes-version = {{.new}}\" aus.\n* Verwenden Sie den vorhandenen Cluster mit Kubernetes v {{.old}} oder höher: Führen Sie \"minikube start {{.profile}} --kubernetes-version = {{.old}}\" aus.",
|
||||
"Error: [{{.id}}] {{.error}}": "",
|
||||
"Examples": "",
|
||||
"Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'": "",
|
||||
"Exiting": "Wird beendet",
|
||||
"Exiting.": "",
|
||||
"External Adapter on which external switch will be created if no external switch is found. (hyperv driver only)": "",
|
||||
|
|
@ -177,38 +155,38 @@
|
|||
"Failed to cache ISO": "",
|
||||
"Failed to cache and load images": "",
|
||||
"Failed to cache binaries": "",
|
||||
"Failed to cache images": "",
|
||||
"Failed to cache images to tar": "",
|
||||
"Failed to cache kubectl": "",
|
||||
"Failed to change permissions for {{.minikube_dir_path}}: {{.error}}": "Fehler beim Ändern der Berechtigungen für {{.minikube_dir_path}}: {{.error}}",
|
||||
"Failed to check if machine exists": "",
|
||||
"Failed to check main repository and mirrors for images for images": "",
|
||||
"Failed to delete cluster: {{.error}}": "Fehler beim Löschen des Clusters: {{.error}}",
|
||||
"Failed to delete cluster: {{.error}}__1": "Fehler beim Löschen des Clusters: {{.error}}",
|
||||
"Failed to delete images": "",
|
||||
"Failed to delete images from config": "",
|
||||
"Failed to delete node {{.name}}": "",
|
||||
"Failed to enable container runtime": "",
|
||||
"Failed to generate config": "",
|
||||
"Failed to get API Server URL": "",
|
||||
"Failed to get bootstrapper": "",
|
||||
"Failed to get command runner": "",
|
||||
"Failed to get driver URL": "",
|
||||
"Failed to get image map": "",
|
||||
"Failed to get machine client": "",
|
||||
"Failed to get service URL: {{.error}}": "",
|
||||
"Failed to kill mount process: {{.error}}": "Fehler beim Beenden des Bereitstellungsprozesses: {{.error}}",
|
||||
"Failed to list cached images": "",
|
||||
"Failed to parse kubernetes version": "",
|
||||
"Failed to reload cached images": "",
|
||||
"Failed to save config": "",
|
||||
"Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}": "NO_PROXY Env konnte nicht festgelegt werden. Benutzen Sie `export NO_PROXY = $ NO_PROXY, {{. Ip}}",
|
||||
"Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.": "",
|
||||
"Failed to setup certs": "",
|
||||
"Failed to setup kubeconfig": "",
|
||||
"Failed to start node {{.name}}": "",
|
||||
"Failed to stop node {{.name}}": "",
|
||||
"Failed to update cluster": "",
|
||||
"Failed to update config": "",
|
||||
"Failed unmount: {{.error}}": "",
|
||||
"File permissions used for the mount": "",
|
||||
"Filter to use only VM Drivers": "",
|
||||
"Flags": "",
|
||||
"Follow": "",
|
||||
"For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "Für beste Ergebnisse installieren Sie kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/",
|
||||
|
|
@ -218,13 +196,16 @@
|
|||
"Force minikube to perform possibly dangerous operations": "minikube zwingen, möglicherweise gefährliche Operationen durchzuführen",
|
||||
"Found network options:": "Gefundene Netzwerkoptionen:",
|
||||
"Found {{.number}} invalid profile(s) !": "",
|
||||
"Generate unable to parse disk size '{{.diskSize}}': {{.error}}": "",
|
||||
"Generate unable to parse memory '{{.memory}}': {{.error}}": "",
|
||||
"Gets the kubernetes URL(s) for the specified service in your local cluster": "",
|
||||
"Gets the kubernetes URL(s) for the specified service in your local cluster. In the case of multiple URLs they will be printed one at a time.": "",
|
||||
"Gets the logs of the running instance, used for debugging minikube, not user code.": "",
|
||||
"Gets the status of a local kubernetes cluster": "",
|
||||
"Gets the status of a local kubernetes cluster.\n\tExit status contains the status of minikube's VM, cluster and kubernetes encoded on it's bits in this order from right to left.\n\tEg: 7 meaning: 1 (for minikube NOK) + 2 (for cluster NOK) + 4 (for kubernetes NOK)": "",
|
||||
"Gets the value of PROPERTY_NAME from the minikube config file": "",
|
||||
"Getting machine config failed": "",
|
||||
"Getting bootstrapper": "",
|
||||
"Getting primary control plane": "",
|
||||
"Global Flags": "",
|
||||
"Go template format string for the cache list output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#CacheListTemplate": "",
|
||||
"Go template format string for the config view output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate": "",
|
||||
|
|
@ -235,6 +216,7 @@
|
|||
"Hyperkit is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "",
|
||||
"Hyperkit networking is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "",
|
||||
"If set, automatically updates drivers to the latest version. Defaults to true.": "",
|
||||
"If set, download tarball of preloaded images if available to improve start time. Defaults to true.": "",
|
||||
"If set, install addons. Defaults to true.": "",
|
||||
"If set, pause all namespaces": "",
|
||||
"If set, unpause all namespaces": "",
|
||||
|
|
@ -251,8 +233,9 @@
|
|||
"Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added.": "Unsichere Docker-Registrys, die an den Docker-Daemon übergeben werden. Der CIDR-Bereich des Standarddienstes wird automatisch hinzugefügt.",
|
||||
"Install VirtualBox, or select an alternative value for --driver": "",
|
||||
"Install the latest hyperkit binary, and run 'minikube delete'": "",
|
||||
"Invalid size passed in argument: {{.error}}": "",
|
||||
"IsEnabled failed": "",
|
||||
"Istio needs {{.minCPUs}} CPUs -- your configuration only allocates {{.cpus}} CPUs": "",
|
||||
"Istio needs {{.minMem}}MB of memory -- your configuration only allocates {{.memory}}MB": "",
|
||||
"Kill the mount process spawned by minikube start": "",
|
||||
"Kubernetes {{.new}} is now available. If you would like to upgrade, specify: --kubernetes-version={{.new}}": "",
|
||||
"Kubernetes {{.version}} is not supported by this release of minikube": "",
|
||||
|
|
@ -267,7 +250,7 @@
|
|||
"Local folders to share with Guest via NFS mounts (hyperkit driver only)": "Lokale Ordner, die über NFS-Bereitstellungen für Gast freigegeben werden (nur Hyperkit-Treiber)",
|
||||
"Location of the VPNKit socket used for networking. If empty, disables Hyperkit VPNKitSock, if 'auto' uses Docker for Mac VPNKit connection, otherwise uses the specified VSock (hyperkit driver only)": "Speicherort des VPNKit-Sockets, der für das Netzwerk verwendet wird. Wenn leer, wird Hyperkit VPNKitSock deaktiviert. Wenn 'auto' die Docker for Mac VPNKit-Verbindung verwendet, wird andernfalls der angegebene VSock verwendet (nur Hyperkit-Treiber).",
|
||||
"Location of the minikube iso": "Speicherort der minikube-ISO",
|
||||
"Location of the minikube iso.": "",
|
||||
"Locations to fetch the minikube ISO from.": "",
|
||||
"Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'": "",
|
||||
"Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'.": "",
|
||||
"Message Size: {{.size}}": "",
|
||||
|
|
@ -285,15 +268,18 @@
|
|||
"NOTE: This process must stay alive for the mount to be accessible ...": "",
|
||||
"Networking and Connectivity Commands:": "",
|
||||
"No minikube profile was found. You can create one using `minikube start`.": "",
|
||||
"Node \"{{.node_name}}\" stopped.": "",
|
||||
"Node may be unable to resolve external DNS records": "",
|
||||
"Node operations": "",
|
||||
"Node {{.name}} was successfully deleted.": "",
|
||||
"Node {{.nodeName}} does not exist.": "",
|
||||
"Non-destructive downgrades are not supported, but you can proceed with one of the following options:\n\n 1) Recreate the cluster with Kubernetes v{{.new}}, by running:\n\n minikube delete{{.profile}}\n minikube start{{.profile}} --kubernetes-version={{.new}}\n\n 2) Create a second cluster with Kubernetes v{{.new}}, by running:\n\n minikube start -p {{.suggestedName}} --kubernetes-version={{.new}}\n\n 3) Use the existing cluster at version Kubernetes v{{.old}}, by running:\n\n minikube start{{.profile}} --kubernetes-version={{.old}}": "",
|
||||
"None of the known repositories in your location are accessible. Using {{.image_repository_name}} as fallback.": "Keines der bekannten Repositories an Ihrem Standort ist zugänglich. {{.image_repository_name}} wird als Fallback verwendet.",
|
||||
"None of the known repositories is accessible. Consider specifying an alternative image repository with --image-repository flag": "Keines der bekannten Repositories ist zugänglich. Erwägen Sie, ein alternatives Image-Repository mit der Kennzeichnung --image-repository anzugeben",
|
||||
"Not passing {{.name}}={{.value}} to docker env.": "",
|
||||
"Noticed that you are using minikube docker-env:": "",
|
||||
"Noticed you have an activated docker-env on {{.driver_name}} driver in this terminal:": "",
|
||||
"Number of CPUs allocated to Kubernetes.": "",
|
||||
"Number of CPUs allocated to the minikube VM": "Anzahl der CPUs, die der minikube-VM zugeordnet sind",
|
||||
"Number of CPUs allocated to the minikube VM.": "",
|
||||
"Number of lines back to go within the log": "",
|
||||
"OS release is {{.pretty_name}}": "",
|
||||
"Open the addons URL with https instead of http": "",
|
||||
|
|
@ -314,6 +300,7 @@
|
|||
"Please install the minikube hyperkit VM driver, or select an alternative --driver": "",
|
||||
"Please install the minikube kvm2 VM driver, or select an alternative --driver": "",
|
||||
"Please make sure the service you are looking for is deployed or is in the correct namespace.": "",
|
||||
"Please re-eval your docker-env, To ensure your environment variables have updated ports: \n\n\t'minikube -p {{.profile_name}} docker-env'": "",
|
||||
"Please specify the directory to be mounted: \n\tminikube mount \u003csource directory\u003e:\u003ctarget directory\u003e (example: \"/host-home:/vm-home\")": "",
|
||||
"Please upgrade the '{{.driver_executable}}'. {{.documentation_url}}": "Aktualisieren Sie '{{.driver_executable}}'. {{.documentation_url}}",
|
||||
"Populates the specified folder with documentation in markdown about minikube": "",
|
||||
|
|
@ -327,10 +314,10 @@
|
|||
"Profile gets or sets the current minikube profile": "",
|
||||
"Profile name \"{{.profilename}}\" is minikube keyword. To delete profile use command minikube delete -p \u003cprofile name\u003e": "",
|
||||
"Provide VM UUID to restore MAC address (hyperkit driver only)": "Geben Sie die VM-UUID an, um die MAC-Adresse wiederherzustellen (nur Hyperkit-Treiber)",
|
||||
"Pulling base image ...": "",
|
||||
"Reboot to complete VirtualBox installation, verify that VirtualBox is not blocked by your system, and/or use another hypervisor": "",
|
||||
"Rebuild libvirt with virt-network support": "",
|
||||
"Received {{.name}} signal": "",
|
||||
"Reconfiguring existing host ...": "",
|
||||
"Registry mirrors to pass to the Docker daemon": "Registry-Mirror, die an den Docker-Daemon übergeben werden",
|
||||
"Reinstall VirtualBox and reboot. Alternatively, try the kvm2 driver: https://minikube.sigs.k8s.io/docs/reference/drivers/kvm2/": "",
|
||||
"Reinstall VirtualBox and verify that it is not blocked: System Preferences -\u003e Security \u0026 Privacy -\u003e General -\u003e Some system software was blocked from loading": "",
|
||||
|
|
@ -341,7 +328,10 @@
|
|||
"Requested cpu count {{.requested_cpus}} is less than the minimum allowed of {{.minimum_cpus}}": "",
|
||||
"Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}": "Die angeforderte Festplattengröße {{.requested_size}} liegt unter dem Mindestwert von {{.minimum_size}}.",
|
||||
"Requested memory allocation ({{.memory}}MB) is less than the default memory allocation of {{.default_memorysize}}MB. Beware that minikube might not work correctly or crash unexpectedly.": "Die angeforderte Speicherzuordnung ({{.memory}} MB) ist geringer als die Standardspeicherzuordnung von {{.default_memorysize}} MB. Beachten Sie, dass minikube möglicherweise nicht richtig funktioniert oder unerwartet abstürzt.",
|
||||
"Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommended}}MB. Kubernetes may crash unexpectedly.": "",
|
||||
"Requested memory allocation {{.requested_size}} is less than the minimum allowed of {{.minimum_size}}": "Die angeforderte Speicherzuweisung {{.requested_size}} liegt unter dem zulässigen Mindestwert von {{.minimum_size}}.",
|
||||
"Requested memory allocation {{.requested}}MB is less than the usable minimum of {{.minimum}}MB": "",
|
||||
"Retarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "",
|
||||
"Retrieve the ssh identity key path of the specified cluster": "",
|
||||
"Retrieve the ssh identity key path of the specified cluster.": "",
|
||||
"Retrieves the IP address of the running cluster": "",
|
||||
|
|
@ -354,8 +344,10 @@
|
|||
"Run minikube from the C: drive.": "",
|
||||
"Run the kubernetes client, download it if necessary. Remember -- after kubectl!\n\nExamples:\nminikube kubectl -- --help\nminikube kubectl -- get pods --namespace kube-system": "",
|
||||
"Run the minikube command as an Administrator": "",
|
||||
"Run: \"{{.delete}}\", then \"{{.start}} --alsologtostderr -v=1\" to try again with more logging": "",
|
||||
"Run: 'chmod 600 $HOME/.kube/config'": "",
|
||||
"Running on localhost (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "",
|
||||
"Service '{{.service}}' was not found in '{{.namespace}}' namespace.\nYou may select another namespace by using 'minikube service {{.service}} -n \u003cnamespace\u003e'. Or list out all the services using 'minikube service list'": "",
|
||||
"Set failed": "",
|
||||
"Set flag to delete all profiles": "",
|
||||
"Set this flag to delete the '.minikube' folder from your user directory.": "",
|
||||
|
|
@ -381,8 +373,8 @@
|
|||
"Specify the 9p version that the mount should use": "",
|
||||
"Specify the ip that the mount should be setup on": "",
|
||||
"Specify the mount filesystem type (supported types: 9p)": "",
|
||||
"Starting existing {{.driver_name}} VM for \"{{.profile_name}}\" ...": "",
|
||||
"Starting node": "",
|
||||
"StartHost failed again: {{.error}}": "",
|
||||
"StartHost failed, but will try again: {{.error}}": "",
|
||||
"Starting tunnel for service {{.service}}.": "",
|
||||
"Starts a local kubernetes cluster": "Startet einen lokalen Kubernetes-Cluster",
|
||||
"Starts a node.": "",
|
||||
|
|
@ -395,7 +387,6 @@
|
|||
"Successfully added {{.name}} to {{.cluster}}!": "",
|
||||
"Successfully deleted all profiles": "",
|
||||
"Successfully mounted {{.sourcePath}} to {{.destinationPath}}": "",
|
||||
"Successfully powered off Hyper-V. minikube driver -- {{.driver}}": "",
|
||||
"Successfully purged minikube directory located at - [{{.minikubeDirectory}}]": "",
|
||||
"Suggestion: {{.advice}}": "",
|
||||
"Suggestion: {{.fix}}": "",
|
||||
|
|
@ -427,12 +418,16 @@
|
|||
"The cluster dns domain name used in the kubernetes cluster": "Der DNS-Domänenname des Clusters, der im Kubernetes-Cluster verwendet wird",
|
||||
"The container runtime to be used (docker, crio, containerd)": "Die zu verwendende Container-Laufzeit (Docker, Crio, Containerd)",
|
||||
"The container runtime to be used (docker, crio, containerd).": "",
|
||||
"The control plane for \"{{.name}}\" is paused!": "",
|
||||
"The control plane node \"{{.name}}\" does not exist.": "",
|
||||
"The control plane node is not running (state={{.state}})": "",
|
||||
"The control plane node must be running for this command": "",
|
||||
"The cri socket path to be used": "Der zu verwendende Cri-Socket-Pfad",
|
||||
"The cri socket path to be used.": "",
|
||||
"The docker service within '{{.profile}}' is not active": "",
|
||||
"The docker service within '{{.name}}' is not active": "",
|
||||
"The docker-env command is only compatible with the \"docker\" runtime, but this cluster was configured to use the \"{{.runtime}}\" runtime.": "",
|
||||
"The driver '{{.driver}}' is not supported on {{.os}}": "Der Treiber '{{.driver}}' wird auf {{.os}} nicht unterstützt",
|
||||
"The driver {{.experimental}} '{{.driver}}' is not supported on {{.os}}": "",
|
||||
"The existing \"{{.profile_name}}\" VM that was created using the \"{{.old_driver}}\" driver, and is incompatible with the \"{{.driver}}\" driver.": "",
|
||||
"The existing \"{{.name}}\" VM was created using the \"{{.old}}\" driver, and is incompatible with the \"{{.new}}\" driver.": "",
|
||||
"The hyperv virtual switch name. Defaults to first found. (hyperv driver only)": "Der Name des virtuellen Hyperv-Switch. Standardmäßig zuerst gefunden. (nur Hyperv-Treiber)",
|
||||
"The hypervisor does not appear to be configured properly. Run 'minikube start --alsologtostderr -v=1' and inspect the error code": "",
|
||||
"The initial time interval for each check that wait performs in seconds": "",
|
||||
|
|
@ -444,10 +439,14 @@
|
|||
"The name of the node to delete": "",
|
||||
"The name of the node to start": "",
|
||||
"The node to get logs from. Defaults to the primary control plane.": "",
|
||||
"The node to ssh into. Defaults to the primary control plane.": "",
|
||||
"The none driver is not compatible with multi-node clusters.": "",
|
||||
"The none driver requires conntrack to be installed for kubernetes version {{.k8sVersion}}": "",
|
||||
"The number of bytes to use for 9p packet payload": "",
|
||||
"The number of nodes to spin up. Defaults to 1.": "",
|
||||
"The output format. One of 'json', 'table'": "",
|
||||
"The path on the file system where the docs in markdown need to be saved": "",
|
||||
"The podman service within '{{.profile}}' is not active": "",
|
||||
"The podman service within '{{.cluster}}' is not active": "",
|
||||
"The service namespace": "",
|
||||
"The service {{.service}} requires privileged ports to be exposed: {{.ports}}": "",
|
||||
"The services namespace": "",
|
||||
|
|
@ -456,19 +455,24 @@
|
|||
"The value passed to --format is invalid: {{.error}}": "",
|
||||
"The vmwarefusion driver is deprecated and support for it will be removed in a future release.\n\t\t\tPlease consider switching to the new vmware unified driver, which is intended to replace the vmwarefusion driver.\n\t\t\tSee https://minikube.sigs.k8s.io/docs/reference/drivers/vmware/ for more information.\n\t\t\tTo disable this message, run [minikube config set ShowDriverDeprecationNotification false]": "",
|
||||
"The {{.driver_name}} driver should not be used with root privileges.": "Der Treiber {{.driver_name}} sollte nicht mit Root-Rechten verwendet werden.",
|
||||
"There is no local cluster named \"{{.cluster}}\"": "",
|
||||
"There's a new version for '{{.driver_executable}}'. Please consider upgrading. {{.documentation_url}}": "Es gibt eine neue Version für '{{.driver_executable}}'. Bitte erwägen Sie ein Upgrade. {{.documentation_url}}",
|
||||
"These changes will take effect upon a minikube delete and then a minikube start": "",
|
||||
"This addon does not have an endpoint defined for the 'addons open' command.\nYou can add one by annotating a service with the label {{.labelName}}:{{.addonName}}": "",
|
||||
"This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true": "Dies kann auch automatisch erfolgen, indem Sie die env var CHANGE_MINIKUBE_NONE_USER = true setzen",
|
||||
"This control plane is not running! (state={{.state}})": "",
|
||||
"This is unusual - you may want to investigate using \"{{.command}}\"": "",
|
||||
"This will keep the existing kubectl context and will create a minikube context.": "Dadurch wird der vorhandene Kubectl-Kontext beibehalten und ein minikube-Kontext erstellt.",
|
||||
"This will start the mount daemon and automatically mount files into minikube": "Dadurch wird der Mount-Daemon gestartet und die Dateien werden automatisch in minikube geladen",
|
||||
"This will start the mount daemon and automatically mount files into minikube.": "",
|
||||
"Tip: To remove this root owned cluster, run: sudo {{.cmd}}": "",
|
||||
"Tip: To remove this root owned cluster, run: sudo {{.cmd}} delete": "Tipp: Um diesen Root-Cluster zu entfernen, führen Sie Folgendes aus: sudo {{.cmd}} delete",
|
||||
"To connect to this cluster, use: kubectl --context={{.name}}": "Verwenden Sie zum Herstellen einer Verbindung zu diesem Cluster: kubectl --context = {{.name}}",
|
||||
"To connect to this cluster, use: kubectl --context={{.name}}__1": "Verwenden Sie zum Herstellen einer Verbindung zu diesem Cluster: kubectl --context = {{.name}}",
|
||||
"To connect to this cluster, use: kubectl --context={{.profile_name}}": "",
|
||||
"To disable this notice, run: 'minikube config set WantUpdateNotification false'\\n": "",
|
||||
"To proceed, either:\n\n 1) Delete the existing \"{{.profile_name}}\" cluster using: '{{.command}} delete'\n\n * or *\n\n 2) Start the existing \"{{.profile_name}}\" cluster using: '{{.command}} start --driver={{.old_driver}}'": "",
|
||||
"To fix this, run: {{.command}}": "",
|
||||
"To proceed, either:\n\n1) Delete the existing \"{{.name}}\" cluster using: '{{.delcommand}}'\n\n* or *\n\n2) Start the existing \"{{.name}}\" cluster using: '{{.command}} --driver={{.old}}'": "",
|
||||
"To see addons list for other profiles use: `minikube addons -p name list`": "",
|
||||
"To start minikube with HyperV Powershell must be in your PATH`": "",
|
||||
"To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:": "Möglicherweise müssen Sie Kubectl- oder minikube-Befehle verschieben, um sie als eigenen Nutzer zu verwenden. Um beispielsweise Ihre eigenen Einstellungen zu überschreiben, führen Sie aus:",
|
||||
|
|
@ -478,24 +482,31 @@
|
|||
"Unable to determine a default driver to use. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/": "",
|
||||
"Unable to enable dashboard": "",
|
||||
"Unable to fetch latest version info": "",
|
||||
"Unable to find control plane": "",
|
||||
"Unable to generate docs": "",
|
||||
"Unable to generate the documentation. Please ensure that the path specified is a directory, exists \u0026 you have permission to write to it.": "",
|
||||
"Unable to get VM IP address": "",
|
||||
"Unable to get addon status for {{.name}}: {{.error}}": "",
|
||||
"Unable to get bootstrapper: {{.error}}": "Bootstrapper kann nicht abgerufen werden: {{.error}}",
|
||||
"Unable to get command runner": "",
|
||||
"Unable to get control plane status: {{.error}}": "",
|
||||
"Unable to get current user": "",
|
||||
"Unable to get driver IP": "",
|
||||
"Unable to get machine status": "",
|
||||
"Unable to get runtime": "",
|
||||
"Unable to get the status of the {{.name}} cluster.": "",
|
||||
"Unable to kill mount process: {{.error}}": "",
|
||||
"Unable to load cached images from config file.": "Zwischengespeicherte Bilder können nicht aus der Konfigurationsdatei geladen werden.",
|
||||
"Unable to load cached images: {{.error}}": "",
|
||||
"Unable to load config: {{.error}}": "Konfig kann nicht geladen werden: {{.error}}",
|
||||
"Unable to load host": "",
|
||||
"Unable to parse \"{{.kubernetes_version}}\": {{.error}}": "\"{{.Kubernetes_version}}\" kann nicht geparst werden: {{.error}}",
|
||||
"Unable to parse default Kubernetes version from constants: {{.error}}": "",
|
||||
"Unable to parse memory '{{.memory}}': {{.error}}": "",
|
||||
"Unable to parse oldest Kubernetes version from constants: {{.error}}": "",
|
||||
"Unable to pull images, which may be OK: {{.error}}": "Bilder können nicht abgerufen werden, was möglicherweise kein Problem darstellt: {{.error}}",
|
||||
"Unable to remove machine directory: %v": "",
|
||||
"Unable to start VM. Please investigate and run 'minikube delete' if possible": "",
|
||||
"Unable to remove machine directory": "",
|
||||
"Unable to restart cluster, will reset it: {{.error}}": "",
|
||||
"Unable to start VM after repeated tries. Please try {{'minikube delete' if possible": "",
|
||||
"Unable to stop VM": "",
|
||||
"Unable to update {{.driver}} driver: {{.error}}": "",
|
||||
"Unable to verify SSH connectivity: {{.error}}. Will retry...": "",
|
||||
|
|
@ -506,6 +517,8 @@
|
|||
"Unset the KUBECONFIG environment variable, or verify that it does not point to an empty or otherwise invalid path": "",
|
||||
"Unset variables instead of setting them": "",
|
||||
"Update server returned an empty list": "",
|
||||
"Updating node": "",
|
||||
"Updating the running {{.driver_name}} \"{{.cluster}}\" {{.machine_type}} ...": "",
|
||||
"Upgrade to QEMU v3.1.0+, run 'virt-host-validate', or ensure that you are not running in a nested VM environment.": "",
|
||||
"Upgrading from Kubernetes {{.old}} to {{.new}}": "Upgrade von Kubernetes {{.old}} auf {{.new}}",
|
||||
"Usage": "",
|
||||
|
|
@ -526,11 +539,11 @@
|
|||
"Userspace file server:": "",
|
||||
"Using image repository {{.name}}": "Verwenden des Image-Repositorys {{.name}}",
|
||||
"Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!": "",
|
||||
"Using the running {{.driver_name}} \"{{.profile_name}}\" VM ...": "",
|
||||
"Using the {{.driver}} driver based on existing profile": "",
|
||||
"Using the {{.driver}} driver based on user configuration": "",
|
||||
"VM driver is one of: %v": "VM-Treiber ist einer von: %v",
|
||||
"VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository": "",
|
||||
"Validation unable to parse disk size '{{.diskSize}}': {{.error}}": "",
|
||||
"Verify that your HTTP_PROXY and HTTPS_PROXY environment variables are set correctly.": "",
|
||||
"Verify the IP address of the running cluster in kubeconfig.": "",
|
||||
"Verifying dashboard health ...": "",
|
||||
|
|
@ -545,11 +558,12 @@
|
|||
"Wait failed": "",
|
||||
"Wait failed: {{.error}}": "",
|
||||
"Wait until Kubernetes core services are healthy before exiting": "Warten Sie vor dem Beenden, bis die Kerndienste von Kubernetes fehlerfrei arbeiten",
|
||||
"Waiting for cluster to come online ...": "",
|
||||
"Where to root the NFS Shares, defaults to /nfsshares (hyperkit driver only)": "Als Root für die NFS-Freigaben wird standardmäßig /nfsshares verwendet (nur Hyperkit-Treiber)",
|
||||
"Whether to use external switch over Default Switch if virtual switch not explicitly specified. (hyperv driver only)": "",
|
||||
"You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details": "Sie scheinen einen Proxy zu verwenden, aber Ihre NO_PROXY-Umgebung enthält keine minikube-IP ({{.ip_address}}). Weitere Informationen finden Sie unter {{.documentation_url}}",
|
||||
"You can also use 'minikube kubectl -- get pods' to invoke a matching version": "",
|
||||
"You can delete them using the following command(s):": "",
|
||||
"You have selected Kubernetes v{{.new}}, but the existing cluster is running Kubernetes v{{.old}}": "",
|
||||
"You may need to manually remove the \"{{.name}}\" VM from your hypervisor": "Möglicherweise müssen Sie die VM \"{{.name}}\" manuell von Ihrem Hypervisor entfernen",
|
||||
"You may need to stop the Hyper-V Manager and run `minikube delete` again.": "",
|
||||
"You must specify a service name": "",
|
||||
|
|
@ -558,45 +572,43 @@
|
|||
"Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.": "",
|
||||
"Your minikube config refers to an unsupported driver. Erase ~/.minikube, and try again.": "",
|
||||
"Your minikube vm is not running, try minikube start.": "",
|
||||
"adding node": "",
|
||||
"addon '{{.name}}' is currently not enabled.\nTo enable this addon run:\nminikube addons enable {{.name}}": "",
|
||||
"addon '{{.name}}' is not a valid addon packaged with minikube.\nTo see the list of available addons run:\nminikube addons list": "",
|
||||
"addons modifies minikube addons files using subcommands like \"minikube addons enable dashboard\"": "",
|
||||
"api load": "",
|
||||
"bash completion failed": "",
|
||||
"call with cleanup=true to remove old tunnels": "",
|
||||
"command runner": "",
|
||||
"config modifies minikube config files using subcommands like \"minikube config set driver kvm\"\nConfigurable fields:\\n\\n": "",
|
||||
"config view failed": "",
|
||||
"creating api client": "",
|
||||
"dashboard service is not running: {{.error}}": "",
|
||||
"deleting node": "",
|
||||
"disable failed": "",
|
||||
"dry-run mode. Validates configuration, but does not mutate system state": "",
|
||||
"dry-run validation complete!": "",
|
||||
"enable failed": "",
|
||||
"error creating clientset": "",
|
||||
"error creating machine client": "",
|
||||
"error getting primary control plane": "",
|
||||
"error getting ssh port": "",
|
||||
"error parsing the input ip address for mount": "",
|
||||
"error starting tunnel": "",
|
||||
"error stopping tunnel": "",
|
||||
"failed to open browser: {{.error}}": "",
|
||||
"getting config": "",
|
||||
"getting primary control plane": "",
|
||||
"generating join token": "",
|
||||
"if true, will embed the certs in kubeconfig.": "",
|
||||
"if you want to create a profile you can by this command: minikube start -p {{.profile_name}}": "",
|
||||
"initialization failed, will try again: {{.error}}": "",
|
||||
"joining cluster": "",
|
||||
"kubeadm detected a TCP port conflict with another process: probably another local Kubernetes installation. Run lsof -p\u003cport\u003e to find the process and kill it": "",
|
||||
"kubectl and minikube configuration will be stored in {{.home_folder}}": "Konfiguration von Kubectl und minikube wird in {{.home_folder}} gespeichert",
|
||||
"kubectl not found in PATH, but is required for the dashboard. Installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "",
|
||||
"kubectl proxy": "",
|
||||
"loading config": "",
|
||||
"libmachine failed": "",
|
||||
"logdir set failed": "",
|
||||
"machine '{{.name}}' does not exist. Proceeding ahead with recreating VM.": "",
|
||||
"max time to wait per Kubernetes core services to be healthy.": "",
|
||||
"minikube addons list --output OUTPUT. json, list": "",
|
||||
"minikube is exiting due to an error. If the above message is not useful, open an issue:": "",
|
||||
"minikube is unable to access the Google Container Registry. You may need to configure it to use a HTTP proxy.": "",
|
||||
"minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --driver\n\t- Use --force to override this connectivity check": "",
|
||||
"minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --vm-driver\n\t- Use --force to override this connectivity check": "",
|
||||
"minikube profile was successfully set to {{.profile_name}}": "",
|
||||
"minikube status --output OUTPUT. json, text": "",
|
||||
"minikube {{.version}} is available! Download it: {{.url}}": "",
|
||||
|
|
@ -605,14 +617,16 @@
|
|||
"mount failed": "",
|
||||
"namespaces to pause": "",
|
||||
"namespaces to unpause": "",
|
||||
"none driver does not support multi-node clusters": "",
|
||||
"not enough arguments ({{.ArgCount}}).\\nusage: minikube config set PROPERTY_NAME PROPERTY_VALUE": "",
|
||||
"pause containers": "",
|
||||
"profile sets the current minikube profile, or gets the current profile if no arguments are provided. This is used to run and manage multiple minikube instance. You can return to the default minikube profile by running `minikube profile default`": "",
|
||||
"profile {{.name}} is not running.": "",
|
||||
"reload cached images.": "",
|
||||
"reloads images previously added using the 'cache add' subcommand": "",
|
||||
"retrieving node": "",
|
||||
"saving node": "",
|
||||
"service {{.namespace_name}}/{{.service_name}} has no node port": "",
|
||||
"setting up certs": "",
|
||||
"stat failed": "",
|
||||
"status json failure": "",
|
||||
"status text failure": "",
|
||||
|
|
@ -637,17 +651,17 @@
|
|||
"usage: minikube delete": "",
|
||||
"usage: minikube profile [MINIKUBE_PROFILE_NAME]": "",
|
||||
"zsh completion failed": "",
|
||||
"{{.cluster}} IP has been updated to point at {{.ip}}": "",
|
||||
"{{.cluster}} IP was already correctly configured for {{.ip}}": "",
|
||||
"{{.driver_name}} \"{{.cluster}}\" {{.machine_type}} is missing, will recreate.": "",
|
||||
"{{.driver}} does not appear to be installed": "",
|
||||
"{{.driver}} does not appear to be installed, but is specified by an existing profile. Please run 'minikube delete' or install {{.driver}}": "",
|
||||
"{{.extra_option_component_name}}.{{.key}}={{.value}}": "",
|
||||
"{{.machine}} IP has been updated to point at {{.ip}}": "",
|
||||
"{{.machine}} IP was already correctly configured for {{.ip}}": "",
|
||||
"{{.name}} cluster does not exist": "",
|
||||
"{{.name}} has no available configuration options": "",
|
||||
"{{.name}} is already running": "",
|
||||
"{{.name}} was successfully configured": "",
|
||||
"{{.name}}\" profile does not exist": "Profil \"{{.name}}\" existiert nicht",
|
||||
"{{.path}} is version {{.client_version}}, and is incompatible with Kubernetes {{.cluster_version}}. You will need to update {{.path}} or use 'minikube kubectl' to connect with this cluster": "",
|
||||
"{{.path}} is v{{.client_version}}, which may be incompatible with Kubernetes v{{.cluster_version}}.": "",
|
||||
"{{.prefix}}minikube {{.version}} on {{.platform}}": "{{.prefix}}minikube {{.version}} auf {{.platform}}",
|
||||
"{{.type}} is not yet a supported filesystem. We will try anyways!": "",
|
||||
"{{.url}} is not accessible: {{.error}}": ""
|
||||
|
|
|
|||
|
|
@ -1,16 +1,13 @@
|
|||
{
|
||||
"\"The '{{.minikube_addon}}' addon is disabled": "",
|
||||
"\"{{.machineName}}\" does not exist, nothing to stop": "",
|
||||
"\"{{.name}}\" profile does not exist": "El perfil \"{{.name}}\" no existe",
|
||||
"\"{{.name}}\" profile does not exist, trying anyways.": "",
|
||||
"\"{{.node_name}}\" stopped.": "",
|
||||
"\"{{.profile_name}}\" does not exist, nothing to stop": "",
|
||||
"\"{{.profile_name}}\" host does not exist, unable to show an IP": "",
|
||||
"'none' driver does not support 'minikube docker-env' command": "",
|
||||
"'none' driver does not support 'minikube mount' command": "",
|
||||
"'none' driver does not support 'minikube podman-env' command": "",
|
||||
"'none' driver does not support 'minikube ssh' command": "",
|
||||
"'{{.driver}}' driver reported an issue: {{.error}}": "",
|
||||
"'{{.profile}}' is not running": "",
|
||||
"- {{.profile}}": "",
|
||||
"A VPN or firewall is interfering with HTTP access to the minikube VM. Alternatively, try a different VM driver: https://minikube.sigs.k8s.io/docs/start/": "",
|
||||
"A firewall is blocking Docker the minikube VM from reaching the internet. You may need to configure it to use a proxy.": "",
|
||||
|
|
@ -32,12 +29,11 @@
|
|||
"Adds a node to the given cluster config, and starts it.": "",
|
||||
"Adds a node to the given cluster.": "",
|
||||
"Advanced Commands:": "",
|
||||
"After minikube restart the dockerd ports might have changed. To ensure docker-env works properly.\nPlease re-eval the docker-env command:\n\n\t'minikube -p {{.profile_name}} docker-env'": "",
|
||||
"Aliases": "",
|
||||
"Allow user prompts for more information": "",
|
||||
"Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to \\\"auto\\\" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers": "Repositorio de imágenes alternativo del que extraer imágenes de Docker. Puedes usarlo cuando tengas acceso limitado a gcr.io. Si quieres que minikube elija uno por ti, solo tienes que definir el valor como \"auto\". Los usuarios de China continental pueden utilizar réplicas locales de gcr.io, como registry.cn-hangzhou.aliyuncs.com/google_containers",
|
||||
"Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g)": "Cantidad de RAM asignada a la VM de minikube (formato: \u003cnúmero\u003e[\u003cunidad\u003e], donde unidad = b, k, m o g)",
|
||||
"Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "",
|
||||
"Amount of RAM to allocate to Kubernetes (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "",
|
||||
"Amount of time to wait for a service in seconds": "",
|
||||
"Amount of time to wait for service in seconds": "",
|
||||
"Another hypervisor, such as VirtualBox, is conflicting with KVM. Please stop the other hypervisor, or use --driver to switch to it.": "",
|
||||
|
|
@ -48,6 +44,7 @@
|
|||
"Because you are using docker driver on Mac, the terminal needs to be open to run it.": "",
|
||||
"Bind Address: {{.Address}}": "",
|
||||
"Block until the apiserver is servicing API requests": "",
|
||||
"Both driver={{.driver}} and vm-driver={{.vmd}} have been set.\n\n Since vm-driver is deprecated, minikube will default to driver={{.driver}}.\n\n If vm-driver is set in the global config, please run \"minikube config unset vm-driver\" to resolve this warning.": "",
|
||||
"Cannot find directory {{.path}} for mount": "",
|
||||
"Cannot use both --output and --format options": "",
|
||||
"Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start": "",
|
||||
|
|
@ -66,9 +63,9 @@
|
|||
"Could not process error from failed deletion": "",
|
||||
"Could not process errors from failed deletion": "",
|
||||
"Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.": "Código de país de la réplica de imagen que quieras utilizar. Déjalo en blanco para usar el valor global. Los usuarios de China continental deben definirlo como cn.",
|
||||
"Creating Kubernetes in {{.driver_name}} container with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "",
|
||||
"Creating Kubernetes in {{.driver_name}} {{.machine_type}} with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "",
|
||||
"Creating mount {{.name}} ...": "Creando la activación {{.name}}...",
|
||||
"Creating {{.driver_name}} VM (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "",
|
||||
"Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "",
|
||||
"DEPRECATED, use `driver` instead.": "",
|
||||
"Default group id used for the mount": "",
|
||||
"Default user id used for the mount": "",
|
||||
|
|
@ -97,10 +94,9 @@
|
|||
"Done! kubectl is now configured to use \"{{.name}}\"": "",
|
||||
"Done! kubectl is now configured to use \"{{.name}}__1": "¡Listo! Se ha configurado kubectl para que use \"{{.name}}",
|
||||
"Download complete!": "Se ha completado la descarga",
|
||||
"Downloading Kubernetes {{.version}} preload ...": "",
|
||||
"Downloading VM boot image ...": "",
|
||||
"Downloading driver {{.driver}}:": "",
|
||||
"Downloading preloaded images tarball for k8s {{.version}} ...": "",
|
||||
"Downloading {{.name}} {{.version}}": "",
|
||||
"ERROR creating `registry-creds-acr` secret": "",
|
||||
"ERROR creating `registry-creds-dpr` secret": "",
|
||||
"ERROR creating `registry-creds-ecr` secret: {{.error}}": "",
|
||||
|
|
@ -109,7 +105,6 @@
|
|||
"Enable addons. see `minikube addons list` for a list of valid addon names.": "",
|
||||
"Enable experimental NVIDIA GPU support in minikube": "Permite habilitar la compatibilidad experimental con GPUs NVIDIA en minikube",
|
||||
"Enable host resolver for NAT DNS requests (virtualbox driver only)": "Permite habilitar la resolución del host en las solicitudes DNS con traducción de direcciones de red (NAT) aplicada (solo con el controlador de Virtualbox)",
|
||||
"Enable istio needs {{.minMem}} MB of memory and {{.minCpus}} CPUs.": "",
|
||||
"Enable proxy for NAT DNS requests (virtualbox driver only)": "Permite habilitar el uso de proxies en las solicitudes de DNS con traducción de direcciones de red (NAT) aplicada (solo con el controlador de Virtualbox)",
|
||||
"Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \\\"--network-plugin=cni\\": "Permite habilitar el complemento CNI predeterminado (/etc/cni/net.d/k8s.conf). Se utiliza junto con \"--network-plugin=cni",
|
||||
"Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \\\"--network-plugin=cni\\\".": "",
|
||||
|
|
@ -131,45 +126,29 @@
|
|||
"Error finding port for mount": "",
|
||||
"Error generating set output": "",
|
||||
"Error generating unset output": "",
|
||||
"Error getting IP": "",
|
||||
"Error getting client": "",
|
||||
"Error getting client: {{.error}}": "",
|
||||
"Error getting cluster": "",
|
||||
"Error getting cluster bootstrapper": "",
|
||||
"Error getting cluster config": "",
|
||||
"Error getting config": "",
|
||||
"Error getting control plane": "",
|
||||
"Error getting host": "",
|
||||
"Error getting host IP": "",
|
||||
"Error getting host status": "",
|
||||
"Error getting machine logs": "",
|
||||
"Error getting port binding for '{{.driver_name}} driver: {{.error}}": "",
|
||||
"Error getting primary control plane": "",
|
||||
"Error getting primary cp": "",
|
||||
"Error getting service status": "",
|
||||
"Error getting service with namespace: {{.namespace}} and labels {{.labelName}}:{{.addonName}}: {{.error}}": "",
|
||||
"Error getting ssh client": "",
|
||||
"Error getting the host IP address to use from within the VM": "",
|
||||
"Error host driver ip status": "",
|
||||
"Error killing mount process": "",
|
||||
"Error loading api": "",
|
||||
"Error loading profile config": "",
|
||||
"Error loading profile config: {{.error}}": "",
|
||||
"Error loading profile {{.name}}: {{.error}}": "No se ha podido cargar el perfil {{.name}}: {{.error}}",
|
||||
"Error opening service": "",
|
||||
"Error parsing Driver version: {{.error}}": "No se ha podido analizar la versión de Driver: {{.error}}",
|
||||
"Error parsing minikube version: {{.error}}": "No se ha podido analizar la versión de minikube: {{.error}}",
|
||||
"Error reading {{.path}}: {{.error}}": "",
|
||||
"Error retrieving node": "",
|
||||
"Error starting cluster": "",
|
||||
"Error starting mount": "",
|
||||
"Error starting node": "",
|
||||
"Error while setting kubectl current context : {{.error}}": "",
|
||||
"Error writing mount pid": "",
|
||||
"Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}\"": "",
|
||||
"Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}": "Error: Has seleccionado Kubernetes {{.new}}, pero el clúster de tu perfil utiliza la versión {{.old}}. No se puede cambiar a una versión inferior sin eliminar todos los datos y recursos pertinentes, pero dispones de las siguientes opciones para continuar con la operación:\n* Volver a crear el clúster con Kubernetes {{.new}}: ejecuta \"minikube delete {{.profile}}\" y, luego, \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Crear un segundo clúster con Kubernetes {{.new}}: ejecuta \"minikube start -p \u003cnuevo nombre\u003e --kubernetes-version={{.new}}\"\n* Reutilizar el clúster actual con Kubernetes {{.old}} o una versión posterior: ejecuta \"minikube start {{.profile}} --kubernetes-version={{.old}}",
|
||||
"Error: [{{.id}}] {{.error}}": "",
|
||||
"Examples": "",
|
||||
"Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'": "",
|
||||
"Exiting": "Saliendo",
|
||||
"Exiting.": "",
|
||||
"External Adapter on which external switch will be created if no external switch is found. (hyperv driver only)": "",
|
||||
|
|
@ -177,38 +156,38 @@
|
|||
"Failed to cache ISO": "",
|
||||
"Failed to cache and load images": "",
|
||||
"Failed to cache binaries": "",
|
||||
"Failed to cache images": "",
|
||||
"Failed to cache images to tar": "",
|
||||
"Failed to cache kubectl": "",
|
||||
"Failed to change permissions for {{.minikube_dir_path}}: {{.error}}": "No se han podido cambiar los permisos de {{.minikube_dir_path}}: {{.error}}",
|
||||
"Failed to check if machine exists": "",
|
||||
"Failed to check main repository and mirrors for images for images": "",
|
||||
"Failed to delete cluster: {{.error}}": "No se ha podido eliminar el clúster: {{.error}}",
|
||||
"Failed to delete cluster: {{.error}}__1": "No se ha podido eliminar el clúster: {{.error}}",
|
||||
"Failed to delete images": "",
|
||||
"Failed to delete images from config": "",
|
||||
"Failed to delete node {{.name}}": "",
|
||||
"Failed to enable container runtime": "",
|
||||
"Failed to generate config": "",
|
||||
"Failed to get API Server URL": "",
|
||||
"Failed to get bootstrapper": "",
|
||||
"Failed to get command runner": "",
|
||||
"Failed to get driver URL": "",
|
||||
"Failed to get image map": "",
|
||||
"Failed to get machine client": "",
|
||||
"Failed to get service URL: {{.error}}": "",
|
||||
"Failed to kill mount process: {{.error}}": "No se ha podido detener el proceso de activación: {{.error}}",
|
||||
"Failed to list cached images": "",
|
||||
"Failed to parse kubernetes version": "",
|
||||
"Failed to reload cached images": "",
|
||||
"Failed to save config": "",
|
||||
"Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}": "No se ha podido definir la variable de entorno NO_PROXY. Utiliza export NO_PROXY=$NO_PROXY,{{.ip}}",
|
||||
"Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.": "",
|
||||
"Failed to setup certs": "",
|
||||
"Failed to setup kubeconfig": "",
|
||||
"Failed to start node {{.name}}": "",
|
||||
"Failed to stop node {{.name}}": "",
|
||||
"Failed to update cluster": "",
|
||||
"Failed to update config": "",
|
||||
"Failed unmount: {{.error}}": "",
|
||||
"File permissions used for the mount": "",
|
||||
"Filter to use only VM Drivers": "",
|
||||
"Flags": "",
|
||||
"Follow": "",
|
||||
"For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "Para disfrutar de un funcionamiento óptimo, instala kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/",
|
||||
|
|
@ -218,13 +197,16 @@
|
|||
"Force minikube to perform possibly dangerous operations": "Permite forzar minikube para que realice operaciones potencialmente peligrosas",
|
||||
"Found network options:": "Se han encontrado las siguientes opciones de red:",
|
||||
"Found {{.number}} invalid profile(s) !": "",
|
||||
"Generate unable to parse disk size '{{.diskSize}}': {{.error}}": "",
|
||||
"Generate unable to parse memory '{{.memory}}': {{.error}}": "",
|
||||
"Gets the kubernetes URL(s) for the specified service in your local cluster": "",
|
||||
"Gets the kubernetes URL(s) for the specified service in your local cluster. In the case of multiple URLs they will be printed one at a time.": "",
|
||||
"Gets the logs of the running instance, used for debugging minikube, not user code.": "",
|
||||
"Gets the status of a local kubernetes cluster": "",
|
||||
"Gets the status of a local kubernetes cluster.\n\tExit status contains the status of minikube's VM, cluster and kubernetes encoded on it's bits in this order from right to left.\n\tEg: 7 meaning: 1 (for minikube NOK) + 2 (for cluster NOK) + 4 (for kubernetes NOK)": "",
|
||||
"Gets the value of PROPERTY_NAME from the minikube config file": "",
|
||||
"Getting machine config failed": "",
|
||||
"Getting bootstrapper": "",
|
||||
"Getting primary control plane": "",
|
||||
"Global Flags": "",
|
||||
"Go template format string for the cache list output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#CacheListTemplate": "",
|
||||
"Go template format string for the config view output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate": "",
|
||||
|
|
@ -235,6 +217,7 @@
|
|||
"Hyperkit is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "",
|
||||
"Hyperkit networking is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "",
|
||||
"If set, automatically updates drivers to the latest version. Defaults to true.": "",
|
||||
"If set, download tarball of preloaded images if available to improve start time. Defaults to true.": "",
|
||||
"If set, install addons. Defaults to true.": "",
|
||||
"If set, pause all namespaces": "",
|
||||
"If set, unpause all namespaces": "",
|
||||
|
|
@ -251,8 +234,9 @@
|
|||
"Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added.": "Registros de Docker que no son seguros y que se transferirán al daemon de Docker. Se añadirá automáticamente el intervalo CIDR de servicio predeterminado.",
|
||||
"Install VirtualBox, or select an alternative value for --driver": "",
|
||||
"Install the latest hyperkit binary, and run 'minikube delete'": "",
|
||||
"Invalid size passed in argument: {{.error}}": "",
|
||||
"IsEnabled failed": "",
|
||||
"Istio needs {{.minCPUs}} CPUs -- your configuration only allocates {{.cpus}} CPUs": "",
|
||||
"Istio needs {{.minMem}}MB of memory -- your configuration only allocates {{.memory}}MB": "",
|
||||
"Kill the mount process spawned by minikube start": "",
|
||||
"Kubernetes {{.new}} is now available. If you would like to upgrade, specify: --kubernetes-version={{.new}}": "",
|
||||
"Kubernetes {{.version}} is not supported by this release of minikube": "",
|
||||
|
|
@ -267,7 +251,7 @@
|
|||
"Local folders to share with Guest via NFS mounts (hyperkit driver only)": "Carpetas locales que se compartirán con el invitado mediante activaciones de NFS (solo con el controlador de hyperkit)",
|
||||
"Location of the VPNKit socket used for networking. If empty, disables Hyperkit VPNKitSock, if 'auto' uses Docker for Mac VPNKit connection, otherwise uses the specified VSock (hyperkit driver only)": "Ubicación del socket de VPNKit que se utiliza para ofrecer funciones de red. Si se deja en blanco, se inhabilita VPNKitSock de Hyperkit; si se define como \"auto\", se utiliza Docker para las conexiones de VPNKit en Mac. Con cualquier otro valor, se utiliza el VSock especificado (solo con el controlador de hyperkit)",
|
||||
"Location of the minikube iso": "Ubicación de la ISO de minikube",
|
||||
"Location of the minikube iso.": "",
|
||||
"Locations to fetch the minikube ISO from.": "",
|
||||
"Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'": "",
|
||||
"Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'.": "",
|
||||
"Message Size: {{.size}}": "",
|
||||
|
|
@ -285,15 +269,18 @@
|
|||
"NOTE: This process must stay alive for the mount to be accessible ...": "",
|
||||
"Networking and Connectivity Commands:": "",
|
||||
"No minikube profile was found. You can create one using `minikube start`.": "",
|
||||
"Node \"{{.node_name}}\" stopped.": "",
|
||||
"Node may be unable to resolve external DNS records": "",
|
||||
"Node operations": "",
|
||||
"Node {{.name}} was successfully deleted.": "",
|
||||
"Node {{.nodeName}} does not exist.": "",
|
||||
"Non-destructive downgrades are not supported, but you can proceed with one of the following options:\n\n 1) Recreate the cluster with Kubernetes v{{.new}}, by running:\n\n minikube delete{{.profile}}\n minikube start{{.profile}} --kubernetes-version={{.new}}\n\n 2) Create a second cluster with Kubernetes v{{.new}}, by running:\n\n minikube start -p {{.suggestedName}} --kubernetes-version={{.new}}\n\n 3) Use the existing cluster at version Kubernetes v{{.old}}, by running:\n\n minikube start{{.profile}} --kubernetes-version={{.old}}": "",
|
||||
"None of the known repositories in your location are accessible. Using {{.image_repository_name}} as fallback.": "No se puede acceder a ninguno de los repositorios conocidos de tu ubicación. Se utilizará {{.image_repository_name}} como alternativa.",
|
||||
"None of the known repositories is accessible. Consider specifying an alternative image repository with --image-repository flag": "No se puede acceder a ninguno de los repositorios conocidos. Plantéate indicar un repositorio de imágenes alternativo con la marca --image-repository.",
|
||||
"Not passing {{.name}}={{.value}} to docker env.": "",
|
||||
"Noticed that you are using minikube docker-env:": "",
|
||||
"Noticed you have an activated docker-env on {{.driver_name}} driver in this terminal:": "",
|
||||
"Number of CPUs allocated to Kubernetes.": "",
|
||||
"Number of CPUs allocated to the minikube VM": "Número de CPU asignadas a la VM de minikube",
|
||||
"Number of CPUs allocated to the minikube VM.": "",
|
||||
"Number of lines back to go within the log": "",
|
||||
"OS release is {{.pretty_name}}": "",
|
||||
"Open the addons URL with https instead of http": "",
|
||||
|
|
@ -314,6 +301,7 @@
|
|||
"Please install the minikube hyperkit VM driver, or select an alternative --driver": "",
|
||||
"Please install the minikube kvm2 VM driver, or select an alternative --driver": "",
|
||||
"Please make sure the service you are looking for is deployed or is in the correct namespace.": "",
|
||||
"Please re-eval your docker-env, To ensure your environment variables have updated ports: \n\n\t'minikube -p {{.profile_name}} docker-env'": "",
|
||||
"Please specify the directory to be mounted: \n\tminikube mount \u003csource directory\u003e:\u003ctarget directory\u003e (example: \"/host-home:/vm-home\")": "",
|
||||
"Please upgrade the '{{.driver_executable}}'. {{.documentation_url}}": "Actualiza \"{{.driver_executable}}\". {{.documentation_url}}",
|
||||
"Populates the specified folder with documentation in markdown about minikube": "",
|
||||
|
|
@ -327,10 +315,10 @@
|
|||
"Profile gets or sets the current minikube profile": "",
|
||||
"Profile name \"{{.profilename}}\" is minikube keyword. To delete profile use command minikube delete -p \u003cprofile name\u003e": "",
|
||||
"Provide VM UUID to restore MAC address (hyperkit driver only)": "Permite especificar un UUID de VM para restaurar la dirección MAC (solo con el controlador de hyperkit)",
|
||||
"Pulling base image ...": "",
|
||||
"Reboot to complete VirtualBox installation, verify that VirtualBox is not blocked by your system, and/or use another hypervisor": "",
|
||||
"Rebuild libvirt with virt-network support": "",
|
||||
"Received {{.name}} signal": "",
|
||||
"Reconfiguring existing host ...": "",
|
||||
"Registry mirrors to pass to the Docker daemon": "Réplicas del registro que se transferirán al daemon de Docker",
|
||||
"Reinstall VirtualBox and reboot. Alternatively, try the kvm2 driver: https://minikube.sigs.k8s.io/docs/reference/drivers/kvm2/": "",
|
||||
"Reinstall VirtualBox and verify that it is not blocked: System Preferences -\u003e Security \u0026 Privacy -\u003e General -\u003e Some system software was blocked from loading": "",
|
||||
|
|
@ -341,7 +329,10 @@
|
|||
"Requested cpu count {{.requested_cpus}} is less than the minimum allowed of {{.minimum_cpus}}": "",
|
||||
"Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}": "El tamaño de disco de {{.requested_size}} que se ha solicitado es inferior al tamaño mínimo de {{.minimum_size}}",
|
||||
"Requested memory allocation ({{.memory}}MB) is less than the default memory allocation of {{.default_memorysize}}MB. Beware that minikube might not work correctly or crash unexpectedly.": "El valor de la asignación de memoria ({{.memory}} MB) solicitada es inferior a la asignación de memoria predeterminada de {{.default_memorysize}} MB. minikube podría no funcionar correctamente o fallar de manera inesperada.",
|
||||
"Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommended}}MB. Kubernetes may crash unexpectedly.": "",
|
||||
"Requested memory allocation {{.requested_size}} is less than the minimum allowed of {{.minimum_size}}": "El valor de la asignación de memoria de {{.requested_size}} solicitada es inferior al valor mínimo de {{.minimum_size}}",
|
||||
"Requested memory allocation {{.requested}}MB is less than the usable minimum of {{.minimum}}MB": "",
|
||||
"Retarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "",
|
||||
"Retrieve the ssh identity key path of the specified cluster": "",
|
||||
"Retrieve the ssh identity key path of the specified cluster.": "",
|
||||
"Retrieves the IP address of the running cluster": "",
|
||||
|
|
@ -354,8 +345,10 @@
|
|||
"Run minikube from the C: drive.": "",
|
||||
"Run the kubernetes client, download it if necessary. Remember -- after kubectl!\n\nExamples:\nminikube kubectl -- --help\nminikube kubectl -- get pods --namespace kube-system": "",
|
||||
"Run the minikube command as an Administrator": "",
|
||||
"Run: \"{{.delete}}\", then \"{{.start}} --alsologtostderr -v=1\" to try again with more logging": "",
|
||||
"Run: 'chmod 600 $HOME/.kube/config'": "",
|
||||
"Running on localhost (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "",
|
||||
"Service '{{.service}}' was not found in '{{.namespace}}' namespace.\nYou may select another namespace by using 'minikube service {{.service}} -n \u003cnamespace\u003e'. Or list out all the services using 'minikube service list'": "",
|
||||
"Set failed": "",
|
||||
"Set flag to delete all profiles": "",
|
||||
"Set this flag to delete the '.minikube' folder from your user directory.": "",
|
||||
|
|
@ -381,8 +374,8 @@
|
|||
"Specify the 9p version that the mount should use": "",
|
||||
"Specify the ip that the mount should be setup on": "",
|
||||
"Specify the mount filesystem type (supported types: 9p)": "",
|
||||
"Starting existing {{.driver_name}} VM for \"{{.profile_name}}\" ...": "",
|
||||
"Starting node": "",
|
||||
"StartHost failed again: {{.error}}": "",
|
||||
"StartHost failed, but will try again: {{.error}}": "",
|
||||
"Starting tunnel for service {{.service}}.": "",
|
||||
"Starts a local kubernetes cluster": "Inicia un clúster de Kubernetes local",
|
||||
"Starts a node.": "",
|
||||
|
|
@ -395,7 +388,6 @@
|
|||
"Successfully added {{.name}} to {{.cluster}}!": "",
|
||||
"Successfully deleted all profiles": "",
|
||||
"Successfully mounted {{.sourcePath}} to {{.destinationPath}}": "",
|
||||
"Successfully powered off Hyper-V. minikube driver -- {{.driver}}": "",
|
||||
"Successfully purged minikube directory located at - [{{.minikubeDirectory}}]": "",
|
||||
"Suggestion: {{.advice}}": "",
|
||||
"Suggestion: {{.fix}}": "",
|
||||
|
|
@ -427,12 +419,16 @@
|
|||
"The cluster dns domain name used in the kubernetes cluster": "El nombre de dominio de DNS del clúster de Kubernetes",
|
||||
"The container runtime to be used (docker, crio, containerd)": "El entorno de ejecución del contenedor (Docker, cri-o, containerd)",
|
||||
"The container runtime to be used (docker, crio, containerd).": "",
|
||||
"The control plane for \"{{.name}}\" is paused!": "",
|
||||
"The control plane node \"{{.name}}\" does not exist.": "",
|
||||
"The control plane node is not running (state={{.state}})": "",
|
||||
"The control plane node must be running for this command": "",
|
||||
"The cri socket path to be used": "La ruta del socket de cri",
|
||||
"The cri socket path to be used.": "",
|
||||
"The docker service within '{{.profile}}' is not active": "",
|
||||
"The docker service within '{{.name}}' is not active": "",
|
||||
"The docker-env command is only compatible with the \"docker\" runtime, but this cluster was configured to use the \"{{.runtime}}\" runtime.": "",
|
||||
"The driver '{{.driver}}' is not supported on {{.os}}": "El controlador \"{{.driver}}\" no se puede utilizar en {{.os}}",
|
||||
"The driver {{.experimental}} '{{.driver}}' is not supported on {{.os}}": "",
|
||||
"The existing \"{{.profile_name}}\" VM that was created using the \"{{.old_driver}}\" driver, and is incompatible with the \"{{.driver}}\" driver.": "",
|
||||
"The existing \"{{.name}}\" VM was created using the \"{{.old}}\" driver, and is incompatible with the \"{{.new}}\" driver.": "",
|
||||
"The hyperv virtual switch name. Defaults to first found. (hyperv driver only)": "El nombre del conmutador virtual de hyperv. El valor predeterminado será el primer nombre que se encuentre (solo con el controlador de hyperv).",
|
||||
"The hypervisor does not appear to be configured properly. Run 'minikube start --alsologtostderr -v=1' and inspect the error code": "",
|
||||
"The initial time interval for each check that wait performs in seconds": "",
|
||||
|
|
@ -444,10 +440,14 @@
|
|||
"The name of the node to delete": "",
|
||||
"The name of the node to start": "",
|
||||
"The node to get logs from. Defaults to the primary control plane.": "",
|
||||
"The node to ssh into. Defaults to the primary control plane.": "",
|
||||
"The none driver is not compatible with multi-node clusters.": "",
|
||||
"The none driver requires conntrack to be installed for kubernetes version {{.k8sVersion}}": "",
|
||||
"The number of bytes to use for 9p packet payload": "",
|
||||
"The number of nodes to spin up. Defaults to 1.": "",
|
||||
"The output format. One of 'json', 'table'": "",
|
||||
"The path on the file system where the docs in markdown need to be saved": "",
|
||||
"The podman service within '{{.profile}}' is not active": "",
|
||||
"The podman service within '{{.cluster}}' is not active": "",
|
||||
"The service namespace": "",
|
||||
"The service {{.service}} requires privileged ports to be exposed: {{.ports}}": "",
|
||||
"The services namespace": "",
|
||||
|
|
@ -456,19 +456,24 @@
|
|||
"The value passed to --format is invalid: {{.error}}": "",
|
||||
"The vmwarefusion driver is deprecated and support for it will be removed in a future release.\n\t\t\tPlease consider switching to the new vmware unified driver, which is intended to replace the vmwarefusion driver.\n\t\t\tSee https://minikube.sigs.k8s.io/docs/reference/drivers/vmware/ for more information.\n\t\t\tTo disable this message, run [minikube config set ShowDriverDeprecationNotification false]": "",
|
||||
"The {{.driver_name}} driver should not be used with root privileges.": "El controlador {{.driver_name}} no se debe utilizar con privilegios de raíz.",
|
||||
"There is no local cluster named \"{{.cluster}}\"": "",
|
||||
"There's a new version for '{{.driver_executable}}'. Please consider upgrading. {{.documentation_url}}": "Hay una nueva versión de \"{{.driver_executable}}\". Te recomendamos que realices la actualización. {{.documentation_url}}",
|
||||
"These changes will take effect upon a minikube delete and then a minikube start": "",
|
||||
"This addon does not have an endpoint defined for the 'addons open' command.\nYou can add one by annotating a service with the label {{.labelName}}:{{.addonName}}": "",
|
||||
"This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true": "El proceso se puede automatizar si se define la variable de entorno CHANGE_MINIKUBE_NONE_USER=true",
|
||||
"This control plane is not running! (state={{.state}})": "",
|
||||
"This is unusual - you may want to investigate using \"{{.command}}\"": "",
|
||||
"This will keep the existing kubectl context and will create a minikube context.": "Se conservará el contexto de kubectl actual y se creará uno de minikube.",
|
||||
"This will start the mount daemon and automatically mount files into minikube": "Se iniciará el daemon de activación y se activarán automáticamente los archivos en minikube",
|
||||
"This will start the mount daemon and automatically mount files into minikube.": "",
|
||||
"Tip: To remove this root owned cluster, run: sudo {{.cmd}}": "",
|
||||
"Tip: To remove this root owned cluster, run: sudo {{.cmd}} delete": "Para eliminar este clúster de raíz, ejecuta: sudo {{.cmd}} delete",
|
||||
"To connect to this cluster, use: kubectl --context={{.name}}": "Para conectarte a este clúster, usa: kubectl --context={{.name}}",
|
||||
"To connect to this cluster, use: kubectl --context={{.name}}__1": "Para conectarte a este clúster, usa: kubectl --context={{.name}}",
|
||||
"To connect to this cluster, use: kubectl --context={{.profile_name}}": "",
|
||||
"To disable this notice, run: 'minikube config set WantUpdateNotification false'\\n": "",
|
||||
"To proceed, either:\n\n 1) Delete the existing \"{{.profile_name}}\" cluster using: '{{.command}} delete'\n\n * or *\n\n 2) Start the existing \"{{.profile_name}}\" cluster using: '{{.command}} start --driver={{.old_driver}}'": "",
|
||||
"To fix this, run: {{.command}}": "",
|
||||
"To proceed, either:\n\n1) Delete the existing \"{{.name}}\" cluster using: '{{.delcommand}}'\n\n* or *\n\n2) Start the existing \"{{.name}}\" cluster using: '{{.command}} --driver={{.old}}'": "",
|
||||
"To see addons list for other profiles use: `minikube addons -p name list`": "",
|
||||
"To start minikube with HyperV Powershell must be in your PATH`": "",
|
||||
"To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:": "Para usar comandos de kubectl o minikube como tu propio usuario, puede que debas reubicarlos. Por ejemplo, para sobrescribir tu configuración, ejecuta:",
|
||||
|
|
@ -478,24 +483,31 @@
|
|||
"Unable to determine a default driver to use. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/": "",
|
||||
"Unable to enable dashboard": "",
|
||||
"Unable to fetch latest version info": "",
|
||||
"Unable to find control plane": "",
|
||||
"Unable to generate docs": "",
|
||||
"Unable to generate the documentation. Please ensure that the path specified is a directory, exists \u0026 you have permission to write to it.": "",
|
||||
"Unable to get VM IP address": "",
|
||||
"Unable to get addon status for {{.name}}: {{.error}}": "",
|
||||
"Unable to get bootstrapper: {{.error}}": "No se ha podido obtener el programa previo: {{.error}}",
|
||||
"Unable to get command runner": "",
|
||||
"Unable to get control plane status: {{.error}}": "",
|
||||
"Unable to get current user": "",
|
||||
"Unable to get driver IP": "",
|
||||
"Unable to get machine status": "",
|
||||
"Unable to get runtime": "",
|
||||
"Unable to get the status of the {{.name}} cluster.": "",
|
||||
"Unable to kill mount process: {{.error}}": "",
|
||||
"Unable to load cached images from config file.": "No se han podido cargar las imágenes almacenadas en caché del archivo de configuración.",
|
||||
"Unable to load cached images: {{.error}}": "",
|
||||
"Unable to load config: {{.error}}": "No se ha podido cargar la configuración: {{.error}}",
|
||||
"Unable to load host": "",
|
||||
"Unable to parse \"{{.kubernetes_version}}\": {{.error}}": "No se ha podido analizar la versión \"{{.kubernetes_version}}\": {{.error}}",
|
||||
"Unable to parse default Kubernetes version from constants: {{.error}}": "",
|
||||
"Unable to parse memory '{{.memory}}': {{.error}}": "",
|
||||
"Unable to parse oldest Kubernetes version from constants: {{.error}}": "",
|
||||
"Unable to pull images, which may be OK: {{.error}}": "No se ha podido recuperar imágenes, que podrían estar en buen estado: {{.error}}",
|
||||
"Unable to remove machine directory: %v": "",
|
||||
"Unable to start VM. Please investigate and run 'minikube delete' if possible": "",
|
||||
"Unable to remove machine directory": "",
|
||||
"Unable to restart cluster, will reset it: {{.error}}": "",
|
||||
"Unable to start VM after repeated tries. Please try {{'minikube delete' if possible": "",
|
||||
"Unable to stop VM": "",
|
||||
"Unable to update {{.driver}} driver: {{.error}}": "",
|
||||
"Unable to verify SSH connectivity: {{.error}}. Will retry...": "",
|
||||
|
|
@ -506,6 +518,8 @@
|
|||
"Unset the KUBECONFIG environment variable, or verify that it does not point to an empty or otherwise invalid path": "",
|
||||
"Unset variables instead of setting them": "",
|
||||
"Update server returned an empty list": "",
|
||||
"Updating node": "",
|
||||
"Updating the running {{.driver_name}} \"{{.cluster}}\" {{.machine_type}} ...": "",
|
||||
"Upgrade to QEMU v3.1.0+, run 'virt-host-validate', or ensure that you are not running in a nested VM environment.": "",
|
||||
"Upgrading from Kubernetes {{.old}} to {{.new}}": "Actualizando la versión de Kubernetes de {{.old}} a {{.new}}",
|
||||
"Usage": "",
|
||||
|
|
@ -526,11 +540,11 @@
|
|||
"Userspace file server:": "",
|
||||
"Using image repository {{.name}}": "Utilizando el repositorio de imágenes {{.name}}",
|
||||
"Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!": "",
|
||||
"Using the running {{.driver_name}} \"{{.profile_name}}\" VM ...": "",
|
||||
"Using the {{.driver}} driver based on existing profile": "",
|
||||
"Using the {{.driver}} driver based on user configuration": "",
|
||||
"VM driver is one of: %v": "El controlador de la VM es uno de los siguientes: %v",
|
||||
"VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository": "",
|
||||
"Validation unable to parse disk size '{{.diskSize}}': {{.error}}": "",
|
||||
"Verify that your HTTP_PROXY and HTTPS_PROXY environment variables are set correctly.": "",
|
||||
"Verify the IP address of the running cluster in kubeconfig.": "",
|
||||
"Verifying dashboard health ...": "",
|
||||
|
|
@ -545,11 +559,12 @@
|
|||
"Wait failed": "",
|
||||
"Wait failed: {{.error}}": "",
|
||||
"Wait until Kubernetes core services are healthy before exiting": "Espera hasta que los servicios principales de Kubernetes se encuentren en buen estado antes de salir",
|
||||
"Waiting for cluster to come online ...": "",
|
||||
"Where to root the NFS Shares, defaults to /nfsshares (hyperkit driver only)": "Ruta en la raíz de los recursos compartidos de NFS. Su valor predeterminado es /nfsshares (solo con el controlador de hyperkit)",
|
||||
"Whether to use external switch over Default Switch if virtual switch not explicitly specified. (hyperv driver only)": "",
|
||||
"You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details": "Parece que estás usando un proxy, pero tu entorno NO_PROXY no incluye la dirección IP de minikube ({{.ip_address}}). Consulta {{.documentation_url}} para obtener más información",
|
||||
"You can also use 'minikube kubectl -- get pods' to invoke a matching version": "",
|
||||
"You can delete them using the following command(s):": "",
|
||||
"You have selected Kubernetes v{{.new}}, but the existing cluster is running Kubernetes v{{.old}}": "",
|
||||
"You may need to manually remove the \"{{.name}}\" VM from your hypervisor": "Puede que tengas que retirar manualmente la VM \"{{.name}}\" de tu hipervisor",
|
||||
"You may need to stop the Hyper-V Manager and run `minikube delete` again.": "",
|
||||
"You must specify a service name": "",
|
||||
|
|
@ -558,45 +573,43 @@
|
|||
"Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.": "",
|
||||
"Your minikube config refers to an unsupported driver. Erase ~/.minikube, and try again.": "",
|
||||
"Your minikube vm is not running, try minikube start.": "",
|
||||
"adding node": "",
|
||||
"addon '{{.name}}' is currently not enabled.\nTo enable this addon run:\nminikube addons enable {{.name}}": "",
|
||||
"addon '{{.name}}' is not a valid addon packaged with minikube.\nTo see the list of available addons run:\nminikube addons list": "",
|
||||
"addons modifies minikube addons files using subcommands like \"minikube addons enable dashboard\"": "",
|
||||
"api load": "",
|
||||
"bash completion failed": "",
|
||||
"call with cleanup=true to remove old tunnels": "",
|
||||
"command runner": "",
|
||||
"config modifies minikube config files using subcommands like \"minikube config set driver kvm\"\nConfigurable fields:\\n\\n": "",
|
||||
"config view failed": "",
|
||||
"creating api client": "",
|
||||
"dashboard service is not running: {{.error}}": "",
|
||||
"deleting node": "",
|
||||
"disable failed": "",
|
||||
"dry-run mode. Validates configuration, but does not mutate system state": "",
|
||||
"dry-run validation complete!": "",
|
||||
"enable failed": "",
|
||||
"error creating clientset": "",
|
||||
"error creating machine client": "",
|
||||
"error getting primary control plane": "",
|
||||
"error getting ssh port": "",
|
||||
"error parsing the input ip address for mount": "",
|
||||
"error starting tunnel": "",
|
||||
"error stopping tunnel": "",
|
||||
"failed to open browser: {{.error}}": "",
|
||||
"getting config": "",
|
||||
"getting primary control plane": "",
|
||||
"generating join token": "",
|
||||
"if true, will embed the certs in kubeconfig.": "",
|
||||
"if you want to create a profile you can by this command: minikube start -p {{.profile_name}}": "",
|
||||
"initialization failed, will try again: {{.error}}": "",
|
||||
"joining cluster": "",
|
||||
"kubeadm detected a TCP port conflict with another process: probably another local Kubernetes installation. Run lsof -p\u003cport\u003e to find the process and kill it": "",
|
||||
"kubectl and minikube configuration will be stored in {{.home_folder}}": "La configuración de kubectl y de minikube se almacenará en {{.home_folder}}",
|
||||
"kubectl not found in PATH, but is required for the dashboard. Installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "",
|
||||
"kubectl proxy": "",
|
||||
"loading config": "",
|
||||
"libmachine failed": "",
|
||||
"logdir set failed": "",
|
||||
"machine '{{.name}}' does not exist. Proceeding ahead with recreating VM.": "",
|
||||
"max time to wait per Kubernetes core services to be healthy.": "",
|
||||
"minikube addons list --output OUTPUT. json, list": "",
|
||||
"minikube is exiting due to an error. If the above message is not useful, open an issue:": "",
|
||||
"minikube is unable to access the Google Container Registry. You may need to configure it to use a HTTP proxy.": "",
|
||||
"minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --driver\n\t- Use --force to override this connectivity check": "",
|
||||
"minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --vm-driver\n\t- Use --force to override this connectivity check": "",
|
||||
"minikube profile was successfully set to {{.profile_name}}": "",
|
||||
"minikube status --output OUTPUT. json, text": "",
|
||||
"minikube {{.version}} is available! Download it: {{.url}}": "",
|
||||
|
|
@ -605,14 +618,16 @@
|
|||
"mount failed": "",
|
||||
"namespaces to pause": "",
|
||||
"namespaces to unpause": "",
|
||||
"none driver does not support multi-node clusters": "",
|
||||
"not enough arguments ({{.ArgCount}}).\\nusage: minikube config set PROPERTY_NAME PROPERTY_VALUE": "",
|
||||
"pause containers": "",
|
||||
"profile sets the current minikube profile, or gets the current profile if no arguments are provided. This is used to run and manage multiple minikube instance. You can return to the default minikube profile by running `minikube profile default`": "",
|
||||
"profile {{.name}} is not running.": "",
|
||||
"reload cached images.": "",
|
||||
"reloads images previously added using the 'cache add' subcommand": "",
|
||||
"retrieving node": "",
|
||||
"saving node": "",
|
||||
"service {{.namespace_name}}/{{.service_name}} has no node port": "",
|
||||
"setting up certs": "",
|
||||
"stat failed": "",
|
||||
"status json failure": "",
|
||||
"status text failure": "",
|
||||
|
|
@ -637,16 +652,16 @@
|
|||
"usage: minikube delete": "",
|
||||
"usage: minikube profile [MINIKUBE_PROFILE_NAME]": "",
|
||||
"zsh completion failed": "",
|
||||
"{{.cluster}} IP has been updated to point at {{.ip}}": "",
|
||||
"{{.cluster}} IP was already correctly configured for {{.ip}}": "",
|
||||
"{{.driver_name}} \"{{.cluster}}\" {{.machine_type}} is missing, will recreate.": "",
|
||||
"{{.driver}} does not appear to be installed": "",
|
||||
"{{.driver}} does not appear to be installed, but is specified by an existing profile. Please run 'minikube delete' or install {{.driver}}": "",
|
||||
"{{.extra_option_component_name}}.{{.key}}={{.value}}": "",
|
||||
"{{.machine}} IP has been updated to point at {{.ip}}": "",
|
||||
"{{.machine}} IP was already correctly configured for {{.ip}}": "",
|
||||
"{{.name}} cluster does not exist": "",
|
||||
"{{.name}} has no available configuration options": "",
|
||||
"{{.name}} is already running": "",
|
||||
"{{.name}} was successfully configured": "",
|
||||
"{{.path}} is version {{.client_version}}, and is incompatible with Kubernetes {{.cluster_version}}. You will need to update {{.path}} or use 'minikube kubectl' to connect with this cluster": "",
|
||||
"{{.path}} is v{{.client_version}}, which may be incompatible with Kubernetes v{{.cluster_version}}.": "",
|
||||
"{{.prefix}}minikube {{.version}} on {{.platform}}": "{{.prefix}}minikube {{.version}} en {{.platform}}",
|
||||
"{{.type}} is not yet a supported filesystem. We will try anyways!": "",
|
||||
"{{.url}} is not accessible: {{.error}}": ""
|
||||
|
|
|
|||
|
|
@ -1,17 +1,12 @@
|
|||
{
|
||||
"\"The '{{.minikube_addon}}' addon is disabled": "",
|
||||
"\"{{.name}}\" profile does not exist": "Le profil \"{{.name}}\" n'existe pas.",
|
||||
"\"{{.name}}\" profile does not exist, trying anyways.": "",
|
||||
"\"{{.node_name}}\" stopped.": "",
|
||||
"\"{{.profile_name}}\" does not exist, nothing to stop": "",
|
||||
"\"{{.profile_name}}\" host does not exist, unable to show an IP": "",
|
||||
"\"{{.profile_name}}\" stopped.": "\"{{.profile_name}}\" est arrêté.",
|
||||
"\"{{.machineName}}\" does not exist, nothing to stop": "\"{{.machineName}} n'exist pas, rien a arrêter.",
|
||||
"\"{{.name}}\" profile does not exist, trying anyways.": "Le profil \"{{.name}}\" n'existe pas, tentative de suppression quand même.",
|
||||
"'none' driver does not support 'minikube docker-env' command": "",
|
||||
"'none' driver does not support 'minikube mount' command": "",
|
||||
"'none' driver does not support 'minikube podman-env' command": "",
|
||||
"'none' driver does not support 'minikube ssh' command": "",
|
||||
"'{{.driver}}' driver reported an issue: {{.error}}": "",
|
||||
"'{{.profile}}' is not running": "",
|
||||
"- {{.profile}}": "",
|
||||
"A VPN or firewall is interfering with HTTP access to the minikube VM. Alternatively, try a different VM driver: https://minikube.sigs.k8s.io/docs/start/": "",
|
||||
"A firewall is blocking Docker the minikube VM from reaching the internet. You may need to configure it to use a proxy.": "",
|
||||
|
|
@ -33,22 +28,22 @@
|
|||
"Adds a node to the given cluster config, and starts it.": "",
|
||||
"Adds a node to the given cluster.": "",
|
||||
"Advanced Commands:": "",
|
||||
"After minikube restart the dockerd ports might have changed. To ensure docker-env works properly.\nPlease re-eval the docker-env command:\n\n\t'minikube -p {{.profile_name}} docker-env'": "",
|
||||
"Aliases": "",
|
||||
"Allow user prompts for more information": "",
|
||||
"Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to \\\"auto\\\" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers": "Autre dépôt d'images d'où extraire des images Docker. Il peut être utilisé en cas d'accès limité à gcr.io. Définissez-le sur \\\"auto\\\" pour permettre à minikube de choisir la valeur à votre place. Pour les utilisateurs situés en Chine continentale, vous pouvez utiliser des miroirs gcr.io locaux tels que registry.cn-hangzhou.aliyuncs.com/google_containers.",
|
||||
"Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g)": "Quantité de mémoire RAM allouée à la VM minikube (format : \u003cnombre\u003e[\u003cunité\u003e], où \"unité\" = b, k, m ou g).",
|
||||
"Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "",
|
||||
"Amount of RAM to allocate to Kubernetes (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "",
|
||||
"Amount of time to wait for a service in seconds": "",
|
||||
"Amount of time to wait for service in seconds": "",
|
||||
"Another hypervisor, such as VirtualBox, is conflicting with KVM. Please stop the other hypervisor, or use --driver to switch to it.": "",
|
||||
"Automatically selected the {{.driver}} driver": "",
|
||||
"Automatically selected the {{.driver}} driver. Other choices: {{.alternates}}": "",
|
||||
"Automatically selected the {{.driver}} driver": "Choix automatique du driver {{.driver}}",
|
||||
"Automatically selected the {{.driver}} driver. Other choices: {{.alternates}}": "Choix automatique du driver {{.driver}}. Autres choix: {{.alternatives}}",
|
||||
"Available Commands": "",
|
||||
"Basic Commands:": "",
|
||||
"Because you are using docker driver on Mac, the terminal needs to be open to run it.": "",
|
||||
"Bind Address: {{.Address}}": "",
|
||||
"Block until the apiserver is servicing API requests": "",
|
||||
"Both driver={{.driver}} and vm-driver={{.vmd}} have been set.\n\n Since vm-driver is deprecated, minikube will default to driver={{.driver}}.\n\n If vm-driver is set in the global config, please run \"minikube config unset vm-driver\" to resolve this warning.": "",
|
||||
"Cannot find directory {{.path}} for mount": "",
|
||||
"Cannot use both --output and --format options": "",
|
||||
"Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start": "",
|
||||
|
|
@ -68,16 +63,15 @@
|
|||
"Could not process error from failed deletion": "",
|
||||
"Could not process errors from failed deletion": "",
|
||||
"Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.": "Code pays du miroir d'images à utiliser. Laissez ce paramètre vide pour utiliser le miroir international. Pour les utilisateurs situés en Chine continentale, définissez sa valeur sur \"cn\".",
|
||||
"Creating Kubernetes in {{.driver_name}} container with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "",
|
||||
"Creating Kubernetes in {{.driver_name}} {{.machine_type}} with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "",
|
||||
"Creating mount {{.name}} ...": "Création de l'installation {{.name}}…",
|
||||
"Creating {{.driver_name}} VM (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "Création d'une VM {{.driver_name}} (CPUs={{.number_of_cpus}}, Mémoire={{.memory_size}}MB, Disque={{.disk_size}}MB)...",
|
||||
"Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "Création de {{.machine_type}} {{.driver_name}} (CPUs={{.number_of_cpus}}, Mémoire={{.memory_size}}MB, Disque={{.disk_size}}MB)...",
|
||||
"DEPRECATED, use `driver` instead.": "",
|
||||
"Default group id used for the mount": "",
|
||||
"Default user id used for the mount": "",
|
||||
"Delete an image from the local cache.": "",
|
||||
"Deletes a local kubernetes cluster": "",
|
||||
"Deletes a local kubernetes cluster. This command deletes the VM, and removes all\nassociated files.": "",
|
||||
"Deletes a local kubernetes cluster. This command deletes the VM, and removes all associated files.": "Supprime le cluster Kubernetes local. Cette commande supprime la VM ainsi que tous les fichiers associés.",
|
||||
"Deletes a local kubernetes cluster. This command deletes the VM, and removes all\nassociated files.": "Supprime le cluster Kubernetes local. Cette commande supprime la VM ainsi que tous les fichiers associés.",
|
||||
"Deletes a node from a cluster.": "",
|
||||
"Deleting \"{{.profile_name}}\" in {{.driver_name}} ...": "Suppression de \"{{.profile_name}}\" dans {{.driver_name}}...",
|
||||
"Deleting node {{.name}} from cluster {{.cluster}}": "Suppression de noeuds {{.name}} de cluster {{.cluster}}",
|
||||
|
|
@ -90,18 +84,15 @@
|
|||
"Display dashboard URL instead of opening a browser": "",
|
||||
"Display the kubernetes addons URL in the CLI instead of opening it in the default browser": "",
|
||||
"Display the kubernetes service URL in the CLI instead of opening it in the default browser": "",
|
||||
"Display values currently set in the minikube config file": "",
|
||||
"Display values currently set in the minikube config file.": "",
|
||||
"Docker inside the VM is unavailable. Try running 'minikube delete' to reset the VM.": "",
|
||||
"Docs have been saved at - {{.path}}": "",
|
||||
"Documentation: {{.url}}": "",
|
||||
"Done! kubectl is now configured to use \"{{.name}}": "Terminé ! kubectl est maintenant configuré pour utiliser \"{{.name}}\".",
|
||||
"Done! kubectl is now configured to use \"{{.name}}\"": "",
|
||||
"Done! kubectl is now configured to use \"{{.name}}\"": "Terminé ! kubectl est maintenant configuré pour utiliser \"{{.name}}\".",
|
||||
"Download complete!": "Téléchargement terminé !",
|
||||
"Downloading Kubernetes {{.version}} preload ...": "",
|
||||
"Downloading VM boot image ...": "",
|
||||
"Downloading driver {{.driver}}:": "",
|
||||
"Downloading preloaded images tarball for k8s {{.version}} ...": "",
|
||||
"Downloading {{.name}} {{.version}}": "",
|
||||
"ERROR creating `registry-creds-acr` secret": "",
|
||||
"ERROR creating `registry-creds-dpr` secret": "",
|
||||
"ERROR creating `registry-creds-ecr` secret: {{.error}}": "",
|
||||
|
|
@ -110,13 +101,12 @@
|
|||
"Enable addons. see `minikube addons list` for a list of valid addon names.": "",
|
||||
"Enable experimental NVIDIA GPU support in minikube": "Active l'assistance expérimentale du GPU NVIDIA dans minikube.",
|
||||
"Enable host resolver for NAT DNS requests (virtualbox driver only)": "Active le résolveur d'hôte pour les requêtes DNS NAT (pilote VirtualBox uniquement).",
|
||||
"Enable istio needs {{.minMem}} MB of memory and {{.minCpus}} CPUs.": "",
|
||||
"Enable proxy for NAT DNS requests (virtualbox driver only)": "Active le proxy pour les requêtes DNS NAT (pilote VirtualBox uniquement).",
|
||||
"Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \\\"--network-plugin=cni\\": "Active le plug-in CNI par défaut (/etc/cni/net.d/k8s.conf). Utilisé en association avec \\\"--network-plugin=cni\\\".",
|
||||
"Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \\\"--network-plugin=cni\\\".": "",
|
||||
"Enables the addon w/ADDON_NAME within minikube (example: minikube addons enable dashboard). For a list of available addons use: minikube addons list": "",
|
||||
"Enabling '{{.name}}' returned an error: {{.error}}": "",
|
||||
"Enabling addons: {{.addons}}": "",
|
||||
"Enabling addons: {{.addons}}": "Installation des addons: {{.addons}}",
|
||||
"Enabling dashboard ...": "",
|
||||
"Ensure that CRI-O is installed and healthy: Run 'sudo systemctl start crio' and 'journalctl -u crio'. Alternatively, use --container-runtime=docker": "",
|
||||
"Ensure that Docker is installed and healthy: Run 'sudo systemctl start docker' and 'journalctl -u docker'. Alternatively, select another value for --driver": "",
|
||||
|
|
@ -132,45 +122,29 @@
|
|||
"Error finding port for mount": "",
|
||||
"Error generating set output": "",
|
||||
"Error generating unset output": "",
|
||||
"Error getting IP": "",
|
||||
"Error getting client": "",
|
||||
"Error getting client: {{.error}}": "",
|
||||
"Error getting cluster": "",
|
||||
"Error getting cluster bootstrapper": "",
|
||||
"Error getting cluster config": "",
|
||||
"Error getting config": "",
|
||||
"Error getting control plane": "",
|
||||
"Error getting host": "",
|
||||
"Error getting host IP": "",
|
||||
"Error getting host status": "",
|
||||
"Error getting machine logs": "",
|
||||
"Error getting port binding for '{{.driver_name}} driver: {{.error}}": "",
|
||||
"Error getting primary control plane": "",
|
||||
"Error getting primary cp": "",
|
||||
"Error getting service status": "",
|
||||
"Error getting service with namespace: {{.namespace}} and labels {{.labelName}}:{{.addonName}}: {{.error}}": "",
|
||||
"Error getting ssh client": "",
|
||||
"Error getting the host IP address to use from within the VM": "",
|
||||
"Error host driver ip status": "",
|
||||
"Error killing mount process": "",
|
||||
"Error loading api": "",
|
||||
"Error loading profile config": "",
|
||||
"Error loading profile config: {{.error}}": "",
|
||||
"Error loading profile {{.name}}: {{.error}}": "Erreur lors du chargement du profil {{.name}} : {{.error}}",
|
||||
"Error opening service": "",
|
||||
"Error parsing Driver version: {{.error}}": "Erreur lors de l'analyse de la version du pilote de la VM : {{.error}}",
|
||||
"Error parsing minikube version: {{.error}}": "Erreur lors de l'analyse de la version de minikube : {{.error}}",
|
||||
"Error reading {{.path}}: {{.error}}": "",
|
||||
"Error retrieving node": "",
|
||||
"Error starting cluster": "",
|
||||
"Error starting mount": "",
|
||||
"Error starting node": "",
|
||||
"Error while setting kubectl current context : {{.error}}": "",
|
||||
"Error writing mount pid": "",
|
||||
"Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}\"": "",
|
||||
"Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}": "Erreur : Vous avez sélectionné Kubernetes v{{.new}}, mais le cluster existent pour votre profil exécute Kubernetes v{{.old}}. Les rétrogradations non-destructives ne sont pas compatibles. Toutefois, vous pouvez poursuivre le processus en réalisant l'une des trois actions suivantes :\n* Créer à nouveau le cluster en utilisant Kubernetes v{{.new}} – exécutez \"minikube delete {{.profile}}\", puis \"minikube start {{.profile}} --kubernetes-version={{.new}}\".\n* Créer un second cluster avec Kubernetes v{{.new}} – exécutez \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\".\n* Réutiliser le cluster existent avec Kubernetes v{{.old}} ou version ultérieure – exécutez \"minikube start {{.profile}} --kubernetes-version={{.old}}\".",
|
||||
"Error: [{{.id}}] {{.error}}": "",
|
||||
"Examples": "",
|
||||
"Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'": "",
|
||||
"Exiting": "Fermeture…",
|
||||
"Exiting.": "",
|
||||
"External Adapter on which external switch will be created if no external switch is found. (hyperv driver only)": "",
|
||||
|
|
@ -178,38 +152,38 @@
|
|||
"Failed to cache ISO": "",
|
||||
"Failed to cache and load images": "",
|
||||
"Failed to cache binaries": "",
|
||||
"Failed to cache images": "",
|
||||
"Failed to cache images to tar": "",
|
||||
"Failed to cache kubectl": "",
|
||||
"Failed to change permissions for {{.minikube_dir_path}}: {{.error}}": "Échec de la modification des autorisations pour {{.minikube_dir_path}} : {{.error}}",
|
||||
"Failed to check if machine exists": "",
|
||||
"Failed to check main repository and mirrors for images for images": "",
|
||||
"Failed to delete cluster: {{.error}}": "Échec de la suppression du cluster : {{.error}}",
|
||||
"Failed to delete cluster: {{.error}}__1": "Échec de la suppression du cluster : {{.error}}",
|
||||
"Failed to delete images": "",
|
||||
"Failed to delete images from config": "",
|
||||
"Failed to delete node {{.name}}": "",
|
||||
"Failed to enable container runtime": "",
|
||||
"Failed to generate config": "",
|
||||
"Failed to get API Server URL": "",
|
||||
"Failed to get bootstrapper": "",
|
||||
"Failed to get command runner": "",
|
||||
"Failed to get driver URL": "",
|
||||
"Failed to get image map": "",
|
||||
"Failed to get machine client": "",
|
||||
"Failed to get service URL: {{.error}}": "",
|
||||
"Failed to kill mount process: {{.error}}": "Échec de l'arrêt du processus d'installation : {{.error}}",
|
||||
"Failed to list cached images": "",
|
||||
"Failed to parse kubernetes version": "",
|
||||
"Failed to reload cached images": "",
|
||||
"Failed to save config": "",
|
||||
"Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}": "Échec de la définition de NO_PROXY Env. Veuillez utiliser `export NO_PROXY=$NO_PROXY,{{.ip}}.",
|
||||
"Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.": "",
|
||||
"Failed to setup certs": "",
|
||||
"Failed to setup kubeconfig": "",
|
||||
"Failed to start node {{.name}}": "",
|
||||
"Failed to stop node {{.name}}": "",
|
||||
"Failed to update cluster": "",
|
||||
"Failed to update config": "",
|
||||
"Failed unmount: {{.error}}": "",
|
||||
"File permissions used for the mount": "",
|
||||
"Filter to use only VM Drivers": "",
|
||||
"Flags": "",
|
||||
"Follow": "",
|
||||
"For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "Pour des résultats optimaux, installez kubectl à l'adresse suivante : https://kubernetes.io/docs/tasks/tools/install-kubectl/",
|
||||
|
|
@ -219,13 +193,16 @@
|
|||
"Force minikube to perform possibly dangerous operations": "Oblige minikube à réaliser des opérations possiblement dangereuses.",
|
||||
"Found network options:": "Options de réseau trouvées :",
|
||||
"Found {{.number}} invalid profile(s) !": "",
|
||||
"Generate unable to parse disk size '{{.diskSize}}': {{.error}}": "",
|
||||
"Generate unable to parse memory '{{.memory}}': {{.error}}": "",
|
||||
"Gets the kubernetes URL(s) for the specified service in your local cluster": "",
|
||||
"Gets the kubernetes URL(s) for the specified service in your local cluster. In the case of multiple URLs they will be printed one at a time.": "",
|
||||
"Gets the logs of the running instance, used for debugging minikube, not user code.": "",
|
||||
"Gets the status of a local kubernetes cluster": "",
|
||||
"Gets the status of a local kubernetes cluster.\n\tExit status contains the status of minikube's VM, cluster and kubernetes encoded on it's bits in this order from right to left.\n\tEg: 7 meaning: 1 (for minikube NOK) + 2 (for cluster NOK) + 4 (for kubernetes NOK)": "",
|
||||
"Gets the value of PROPERTY_NAME from the minikube config file": "",
|
||||
"Getting machine config failed": "",
|
||||
"Getting bootstrapper": "",
|
||||
"Getting primary control plane": "",
|
||||
"Global Flags": "",
|
||||
"Go template format string for the cache list output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#CacheListTemplate": "",
|
||||
"Go template format string for the config view output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate": "",
|
||||
|
|
@ -236,6 +213,7 @@
|
|||
"Hyperkit is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "",
|
||||
"Hyperkit networking is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "",
|
||||
"If set, automatically updates drivers to the latest version. Defaults to true.": "",
|
||||
"If set, download tarball of preloaded images if available to improve start time. Defaults to true.": "",
|
||||
"If set, install addons. Defaults to true.": "",
|
||||
"If set, pause all namespaces": "",
|
||||
"If set, unpause all namespaces": "",
|
||||
|
|
@ -252,8 +230,9 @@
|
|||
"Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added.": "Registres Docker non sécurisés à transmettre au daemon Docker. La plage CIDR par défaut du service sera ajoutée automatiquement.",
|
||||
"Install VirtualBox, or select an alternative value for --driver": "",
|
||||
"Install the latest hyperkit binary, and run 'minikube delete'": "",
|
||||
"Invalid size passed in argument: {{.error}}": "",
|
||||
"IsEnabled failed": "",
|
||||
"Istio needs {{.minCPUs}} CPUs -- your configuration only allocates {{.cpus}} CPUs": "",
|
||||
"Istio needs {{.minMem}}MB of memory -- your configuration only allocates {{.memory}}MB": "",
|
||||
"Kill the mount process spawned by minikube start": "",
|
||||
"Kubernetes {{.new}} is now available. If you would like to upgrade, specify: --kubernetes-version={{.new}}": "",
|
||||
"Kubernetes {{.version}} is not supported by this release of minikube": "",
|
||||
|
|
@ -268,7 +247,7 @@
|
|||
"Local folders to share with Guest via NFS mounts (hyperkit driver only)": "Dossiers locaux à partager avec l'invité par des installations NFS (pilote hyperkit uniquement).",
|
||||
"Location of the VPNKit socket used for networking. If empty, disables Hyperkit VPNKitSock, if 'auto' uses Docker for Mac VPNKit connection, otherwise uses the specified VSock (hyperkit driver only)": "Emplacement du socket VPNKit exploité pour la mise en réseau. Si la valeur est vide, désactive Hyperkit VPNKitSock. Si la valeur affiche \"auto\", utilise la connexion VPNKit de Docker pour Mac. Sinon, utilise le VSock spécifié (pilote hyperkit uniquement).",
|
||||
"Location of the minikube iso": "Emplacement de l'ISO minikube.",
|
||||
"Location of the minikube iso.": "",
|
||||
"Locations to fetch the minikube ISO from.": "",
|
||||
"Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'": "",
|
||||
"Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'.": "",
|
||||
"Message Size: {{.size}}": "",
|
||||
|
|
@ -286,15 +265,18 @@
|
|||
"NOTE: This process must stay alive for the mount to be accessible ...": "",
|
||||
"Networking and Connectivity Commands:": "",
|
||||
"No minikube profile was found. You can create one using `minikube start`.": "",
|
||||
"Node \"{{.node_name}}\" stopped.": "Le noeud \"{{.node_name}}\" est arrêté.",
|
||||
"Node may be unable to resolve external DNS records": "",
|
||||
"Node operations": "",
|
||||
"Node {{.name}} was successfully deleted.": "",
|
||||
"Node {{.nodeName}} does not exist.": "",
|
||||
"Non-destructive downgrades are not supported, but you can proceed with one of the following options:\n\n 1) Recreate the cluster with Kubernetes v{{.new}}, by running:\n\n minikube delete{{.profile}}\n minikube start{{.profile}} --kubernetes-version={{.new}}\n\n 2) Create a second cluster with Kubernetes v{{.new}}, by running:\n\n minikube start -p {{.suggestedName}} --kubernetes-version={{.new}}\n\n 3) Use the existing cluster at version Kubernetes v{{.old}}, by running:\n\n minikube start{{.profile}} --kubernetes-version={{.old}}": "",
|
||||
"None of the known repositories in your location are accessible. Using {{.image_repository_name}} as fallback.": "Aucun dépôt connu dans votre emplacement n'est accessible. {{.image_repository_name}} est utilisé comme dépôt de remplacement.",
|
||||
"None of the known repositories is accessible. Consider specifying an alternative image repository with --image-repository flag": "Aucun dépôt connu n'est accessible. Pensez à spécifier un autre dépôt d'images à l'aide de l'indicateur \"--image-repository\".",
|
||||
"Not passing {{.name}}={{.value}} to docker env.": "",
|
||||
"Noticed that you are using minikube docker-env:": "",
|
||||
"Noticed you have an activated docker-env on {{.driver_name}} driver in this terminal:": "",
|
||||
"Number of CPUs allocated to Kubernetes.": "",
|
||||
"Number of CPUs allocated to the minikube VM": "Nombre de processeurs alloués à la VM minikube.",
|
||||
"Number of CPUs allocated to the minikube VM.": "",
|
||||
"Number of lines back to go within the log": "",
|
||||
"OS release is {{.pretty_name}}": "",
|
||||
"Open the addons URL with https instead of http": "",
|
||||
|
|
@ -315,6 +297,7 @@
|
|||
"Please install the minikube hyperkit VM driver, or select an alternative --driver": "",
|
||||
"Please install the minikube kvm2 VM driver, or select an alternative --driver": "",
|
||||
"Please make sure the service you are looking for is deployed or is in the correct namespace.": "",
|
||||
"Please re-eval your docker-env, To ensure your environment variables have updated ports: \n\n\t'minikube -p {{.profile_name}} docker-env'": "",
|
||||
"Please specify the directory to be mounted: \n\tminikube mount \u003csource directory\u003e:\u003ctarget directory\u003e (example: \"/host-home:/vm-home\")": "",
|
||||
"Please upgrade the '{{.driver_executable}}'. {{.documentation_url}}": "Veuillez mettre à niveau l'exécutable \"{{.driver_executable}}\". {{.documentation_url}}",
|
||||
"Populates the specified folder with documentation in markdown about minikube": "",
|
||||
|
|
@ -328,22 +311,25 @@
|
|||
"Profile gets or sets the current minikube profile": "",
|
||||
"Profile name \"{{.profilename}}\" is minikube keyword. To delete profile use command minikube delete -p \u003cprofile name\u003e": "",
|
||||
"Provide VM UUID to restore MAC address (hyperkit driver only)": "Fournit l'identifiant unique universel (UUID) de la VM pour restaurer l'adresse MAC (pilote hyperkit uniquement).",
|
||||
"Pulling base image ...": "",
|
||||
"Pulling images ...": "Extraction des images... ",
|
||||
"Reboot to complete VirtualBox installation, verify that VirtualBox is not blocked by your system, and/or use another hypervisor": "",
|
||||
"Rebuild libvirt with virt-network support": "",
|
||||
"Received {{.name}} signal": "",
|
||||
"Reconfiguring existing host ...": "",
|
||||
"Registry mirrors to pass to the Docker daemon": "Miroirs de dépôt à transmettre au daemon Docker.",
|
||||
"Reinstall VirtualBox and reboot. Alternatively, try the kvm2 driver: https://minikube.sigs.k8s.io/docs/reference/drivers/kvm2/": "",
|
||||
"Reinstall VirtualBox and verify that it is not blocked: System Preferences -\u003e Security \u0026 Privacy -\u003e General -\u003e Some system software was blocked from loading": "",
|
||||
"Related issues:": "",
|
||||
"Relaunching Kubernetes using {{.bootstrapper}} ...": "Redémarrage de Kubernetes à l'aide de {{.bootstrapper}}…",
|
||||
"Removed all traces of the \"{{.name}}\" cluster.": "",
|
||||
"Removed all traces of the \"{{.name}}\" cluster.": "Le cluster \"{{.name}}\" a été supprimé.",
|
||||
"Removing {{.directory}} ...": "Suppression du répertoire {{.directory}}…",
|
||||
"Requested cpu count {{.requested_cpus}} is less than the minimum allowed of {{.minimum_cpus}}": "",
|
||||
"Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}": "La taille de disque demandée ({{.requested_size}}) est inférieure à la taille minimale ({{.minimum_size}}).",
|
||||
"Requested memory allocation ({{.memory}}MB) is less than the default memory allocation of {{.default_memorysize}}MB. Beware that minikube might not work correctly or crash unexpectedly.": "L'allocation de mémoire demandée ({{.memory}} Mo) est inférieure à l'allocation de mémoire par défaut ({{.default_memorysize}} Mo). Sachez que minikube pourrait ne pas fonctionner correctement ou planter de manière inattendue.",
|
||||
"Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommended}}MB. Kubernetes may crash unexpectedly.": "",
|
||||
"Requested memory allocation {{.requested_size}} is less than the minimum allowed of {{.minimum_size}}": "L'allocation de mémoire demandée ({{.requested_size}}) est inférieure au minimum autorisé ({{.minimum_size}}).",
|
||||
"Requested memory allocation {{.requested}}MB is less than the usable minimum of {{.minimum}}MB": "",
|
||||
"Retarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "",
|
||||
"Retrieve the ssh identity key path of the specified cluster": "",
|
||||
"Retrieve the ssh identity key path of the specified cluster.": "",
|
||||
"Retrieves the IP address of the running cluster": "",
|
||||
|
|
@ -356,8 +342,10 @@
|
|||
"Run minikube from the C: drive.": "",
|
||||
"Run the kubernetes client, download it if necessary. Remember -- after kubectl!\n\nExamples:\nminikube kubectl -- --help\nminikube kubectl -- get pods --namespace kube-system": "",
|
||||
"Run the minikube command as an Administrator": "",
|
||||
"Run: \"{{.delete}}\", then \"{{.start}} --alsologtostderr -v=1\" to try again with more logging": "",
|
||||
"Run: 'chmod 600 $HOME/.kube/config'": "",
|
||||
"Running on localhost (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "",
|
||||
"Service '{{.service}}' was not found in '{{.namespace}}' namespace.\nYou may select another namespace by using 'minikube service {{.service}} -n \u003cnamespace\u003e'. Or list out all the services using 'minikube service list'": "",
|
||||
"Set failed": "",
|
||||
"Set flag to delete all profiles": "",
|
||||
"Set this flag to delete the '.minikube' folder from your user directory.": "",
|
||||
|
|
@ -383,8 +371,8 @@
|
|||
"Specify the 9p version that the mount should use": "",
|
||||
"Specify the ip that the mount should be setup on": "",
|
||||
"Specify the mount filesystem type (supported types: 9p)": "",
|
||||
"Starting existing {{.driver_name}} VM for \"{{.profile_name}}\" ...": "",
|
||||
"Starting node": "",
|
||||
"StartHost failed again: {{.error}}": "",
|
||||
"StartHost failed, but will try again: {{.error}}": "",
|
||||
"Starting tunnel for service {{.service}}.": "",
|
||||
"Starts a local kubernetes cluster": "Démarre un cluster Kubernetes local.",
|
||||
"Starts a node.": "",
|
||||
|
|
@ -397,7 +385,6 @@
|
|||
"Successfully added {{.name}} to {{.cluster}}!": "",
|
||||
"Successfully deleted all profiles": "",
|
||||
"Successfully mounted {{.sourcePath}} to {{.destinationPath}}": "",
|
||||
"Successfully powered off Hyper-V. minikube driver -- {{.driver}}": "",
|
||||
"Successfully purged minikube directory located at - [{{.minikubeDirectory}}]": "",
|
||||
"Suggestion: {{.advice}}": "",
|
||||
"Suggestion: {{.fix}}": "",
|
||||
|
|
@ -405,7 +392,6 @@
|
|||
"The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube --driver={{.driver_name}}'.": "",
|
||||
"The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube --vm-driver={{.driver_name}}": "Le pilote \"{{.driver_name}}\" nécessite de disposer de droits racine. Veuillez exécuter minikube à l'aide de \"sudo minikube --vm-driver={{.driver_name}}\".",
|
||||
"The \"{{.driver_name}}\" driver should not be used with root privileges.": "",
|
||||
"The \"{{.name}}\" cluster has been deleted.": "Le cluster \"{{.name}}\" a été supprimé.",
|
||||
"The 'none' driver provides limited isolation and may reduce system security and reliability.": "L'isolation fournie par le pilote \"none\" (aucun) est limitée, ce qui peut diminuer la sécurité et la fiabilité du système.",
|
||||
"The '{{.addonName}}' addon is enabled": "",
|
||||
"The '{{.driver}}' driver requires elevated permissions. The following commands will be executed:\\n\\n{{ .example }}\\n": "",
|
||||
|
|
@ -428,12 +414,16 @@
|
|||
"The cluster dns domain name used in the kubernetes cluster": "Nom du domaine DNS du cluster utilisé dans le cluster Kubernetes.",
|
||||
"The container runtime to be used (docker, crio, containerd)": "environment d'exécution du conteneur à utiliser (docker, crio, containerd).",
|
||||
"The container runtime to be used (docker, crio, containerd).": "",
|
||||
"The control plane for \"{{.name}}\" is paused!": "",
|
||||
"The control plane node \"{{.name}}\" does not exist.": "",
|
||||
"The control plane node is not running (state={{.state}})": "",
|
||||
"The control plane node must be running for this command": "",
|
||||
"The cri socket path to be used": "Chemin d'accès au socket CRI à utiliser.",
|
||||
"The cri socket path to be used.": "",
|
||||
"The docker service within '{{.profile}}' is not active": "",
|
||||
"The docker service within '{{.name}}' is not active": "",
|
||||
"The docker-env command is only compatible with the \"docker\" runtime, but this cluster was configured to use the \"{{.runtime}}\" runtime.": "",
|
||||
"The driver '{{.driver}}' is not supported on {{.os}}": "Le pilote \"{{.driver}}\" n'est pas compatible avec {{.os}}.",
|
||||
"The driver {{.experimental}} '{{.driver}}' is not supported on {{.os}}": "",
|
||||
"The existing \"{{.profile_name}}\" VM that was created using the \"{{.old_driver}}\" driver, and is incompatible with the \"{{.driver}}\" driver.": "",
|
||||
"The existing \"{{.name}}\" VM was created using the \"{{.old}}\" driver, and is incompatible with the \"{{.new}}\" driver.": "",
|
||||
"The hyperv virtual switch name. Defaults to first found. (hyperv driver only)": "Nom du commutateur virtuel hyperv. La valeur par défaut affiche le premier commutateur trouvé (pilote hyperv uniquement).",
|
||||
"The hypervisor does not appear to be configured properly. Run 'minikube start --alsologtostderr -v=1' and inspect the error code": "",
|
||||
"The initial time interval for each check that wait performs in seconds": "",
|
||||
|
|
@ -445,10 +435,14 @@
|
|||
"The name of the node to delete": "",
|
||||
"The name of the node to start": "",
|
||||
"The node to get logs from. Defaults to the primary control plane.": "",
|
||||
"The node to ssh into. Defaults to the primary control plane.": "",
|
||||
"The none driver is not compatible with multi-node clusters.": "",
|
||||
"The none driver requires conntrack to be installed for kubernetes version {{.k8sVersion}}": "",
|
||||
"The number of bytes to use for 9p packet payload": "",
|
||||
"The number of nodes to spin up. Defaults to 1.": "",
|
||||
"The output format. One of 'json', 'table'": "",
|
||||
"The path on the file system where the docs in markdown need to be saved": "",
|
||||
"The podman service within '{{.profile}}' is not active": "",
|
||||
"The podman service within '{{.cluster}}' is not active": "",
|
||||
"The service namespace": "",
|
||||
"The service {{.service}} requires privileged ports to be exposed: {{.ports}}": "",
|
||||
"The services namespace": "",
|
||||
|
|
@ -457,19 +451,24 @@
|
|||
"The value passed to --format is invalid: {{.error}}": "",
|
||||
"The vmwarefusion driver is deprecated and support for it will be removed in a future release.\n\t\t\tPlease consider switching to the new vmware unified driver, which is intended to replace the vmwarefusion driver.\n\t\t\tSee https://minikube.sigs.k8s.io/docs/reference/drivers/vmware/ for more information.\n\t\t\tTo disable this message, run [minikube config set ShowDriverDeprecationNotification false]": "",
|
||||
"The {{.driver_name}} driver should not be used with root privileges.": "Le pilote {{.driver_name}} ne doit pas être utilisé avec des droits racine.",
|
||||
"There is no local cluster named \"{{.cluster}}\"": "",
|
||||
"There's a new version for '{{.driver_executable}}'. Please consider upgrading. {{.documentation_url}}": "Une nouvelle version de \"{{.driver_executable}}\" est disponible. Pensez à effectuer la mise à niveau. {{.documentation_url}}",
|
||||
"These changes will take effect upon a minikube delete and then a minikube start": "",
|
||||
"This addon does not have an endpoint defined for the 'addons open' command.\nYou can add one by annotating a service with the label {{.labelName}}:{{.addonName}}": "",
|
||||
"This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true": "Cette opération peut également être réalisée en définissant la variable d'environment \"CHANGE_MINIKUBE_NONE_USER=true\".",
|
||||
"This control plane is not running! (state={{.state}})": "",
|
||||
"This is unusual - you may want to investigate using \"{{.command}}\"": "",
|
||||
"This will keep the existing kubectl context and will create a minikube context.": "Cela permet de conserver le contexte kubectl existent et de créer un contexte minikube.",
|
||||
"This will start the mount daemon and automatically mount files into minikube": "Cela permet de lancer le daemon d'installation et d'installer automatiquement les fichiers dans minikube.",
|
||||
"This will start the mount daemon and automatically mount files into minikube.": "",
|
||||
"Tip: To remove this root owned cluster, run: sudo {{.cmd}}": "",
|
||||
"Tip: To remove this root owned cluster, run: sudo {{.cmd}} delete": "Conseil : Pour supprimer ce cluster appartenant à la racine, exécutez la commande \"sudo {{.cmd}} delete\".",
|
||||
"To connect to this cluster, use: kubectl --context={{.name}}": "Pour vous connecter à ce cluster, utilisez la commande \"kubectl --context={{.name}}\".",
|
||||
"To connect to this cluster, use: kubectl --context={{.name}}__1": "Pour vous connecter à ce cluster, utilisez la commande \"kubectl --context={{.name}}\".",
|
||||
"To connect to this cluster, use: kubectl --context={{.profile_name}}": "",
|
||||
"To disable this notice, run: 'minikube config set WantUpdateNotification false'\\n": "",
|
||||
"To proceed, either:\n\n 1) Delete the existing \"{{.profile_name}}\" cluster using: '{{.command}} delete'\n\n * or *\n\n 2) Start the existing \"{{.profile_name}}\" cluster using: '{{.command}} start --driver={{.old_driver}}'": "",
|
||||
"To fix this, run: {{.command}}": "",
|
||||
"To proceed, either:\n\n1) Delete the existing \"{{.name}}\" cluster using: '{{.delcommand}}'\n\n* or *\n\n2) Start the existing \"{{.name}}\" cluster using: '{{.command}} --driver={{.old}}'": "",
|
||||
"To see addons list for other profiles use: `minikube addons -p name list`": "",
|
||||
"To start minikube with HyperV Powershell must be in your PATH`": "",
|
||||
"To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:": "Pour utiliser les commandes kubectl ou minikube sous votre propre nom d'utilisateur, vous devrez peut-être les déplacer. Par exemple, pour écraser vos propres paramètres, exécutez la commande suivante :",
|
||||
|
|
@ -479,24 +478,31 @@
|
|||
"Unable to determine a default driver to use. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/": "",
|
||||
"Unable to enable dashboard": "",
|
||||
"Unable to fetch latest version info": "",
|
||||
"Unable to find control plane": "",
|
||||
"Unable to generate docs": "",
|
||||
"Unable to generate the documentation. Please ensure that the path specified is a directory, exists \u0026 you have permission to write to it.": "",
|
||||
"Unable to get VM IP address": "",
|
||||
"Unable to get addon status for {{.name}}: {{.error}}": "",
|
||||
"Unable to get bootstrapper: {{.error}}": "Impossible d'obtenir l'amorceur : {{.error}}",
|
||||
"Unable to get command runner": "",
|
||||
"Unable to get control plane status: {{.error}}": "",
|
||||
"Unable to get current user": "",
|
||||
"Unable to get driver IP": "",
|
||||
"Unable to get machine status": "",
|
||||
"Unable to get runtime": "",
|
||||
"Unable to get the status of the {{.name}} cluster.": "",
|
||||
"Unable to kill mount process: {{.error}}": "",
|
||||
"Unable to load cached images from config file.": "Impossible de charger les images mises en cache depuis le fichier de configuration.",
|
||||
"Unable to load cached images: {{.error}}": "",
|
||||
"Unable to load config: {{.error}}": "Impossible de charger la configuration : {{.error}}",
|
||||
"Unable to load host": "",
|
||||
"Unable to parse \"{{.kubernetes_version}}\": {{.error}}": "Impossible d'analyser la version \"{{.kubernetes_version}}\" : {{.error}}",
|
||||
"Unable to parse default Kubernetes version from constants: {{.error}}": "",
|
||||
"Unable to parse memory '{{.memory}}': {{.error}}": "",
|
||||
"Unable to parse oldest Kubernetes version from constants: {{.error}}": "",
|
||||
"Unable to pull images, which may be OK: {{.error}}": "Impossible d'extraire des images, qui sont peut-être au bon format : {{.error}}",
|
||||
"Unable to remove machine directory: %v": "",
|
||||
"Unable to start VM. Please investigate and run 'minikube delete' if possible": "",
|
||||
"Unable to remove machine directory": "",
|
||||
"Unable to restart cluster, will reset it: {{.error}}": "",
|
||||
"Unable to start VM after repeated tries. Please try {{'minikube delete' if possible": "",
|
||||
"Unable to stop VM": "",
|
||||
"Unable to update {{.driver}} driver: {{.error}}": "",
|
||||
"Unable to verify SSH connectivity: {{.error}}. Will retry...": "",
|
||||
|
|
@ -507,6 +513,8 @@
|
|||
"Unset the KUBECONFIG environment variable, or verify that it does not point to an empty or otherwise invalid path": "",
|
||||
"Unset variables instead of setting them": "",
|
||||
"Update server returned an empty list": "",
|
||||
"Updating node": "",
|
||||
"Updating the running {{.driver_name}} \"{{.cluster}}\" {{.machine_type}} ...": "",
|
||||
"Upgrade to QEMU v3.1.0+, run 'virt-host-validate', or ensure that you are not running in a nested VM environment.": "",
|
||||
"Upgrading from Kubernetes {{.old}} to {{.new}}": "Mise à niveau de Kubernetes de la version {{.old}} à la version {{.new}}…",
|
||||
"Usage": "Usage",
|
||||
|
|
@ -527,11 +535,11 @@
|
|||
"Userspace file server:": "",
|
||||
"Using image repository {{.name}}": "Utilisation du dépôt d'images {{.name}}…",
|
||||
"Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!": "",
|
||||
"Using the running {{.driver_name}} \"{{.profile_name}}\" VM ...": "",
|
||||
"Using the {{.driver}} driver based on existing profile": "",
|
||||
"Using the {{.driver}} driver based on user configuration": "",
|
||||
"VM driver is one of: %v": "Le pilote de la VM appartient à : %v",
|
||||
"VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository": "",
|
||||
"Validation unable to parse disk size '{{.diskSize}}': {{.error}}": "",
|
||||
"Verify that your HTTP_PROXY and HTTPS_PROXY environment variables are set correctly.": "",
|
||||
"Verify the IP address of the running cluster in kubeconfig.": "",
|
||||
"Verifying dashboard health ...": "",
|
||||
|
|
@ -548,12 +556,13 @@
|
|||
"Wait failed: {{.error}}": "",
|
||||
"Wait until Kubernetes core services are healthy before exiting": "Avant de quitter, veuillez patienter jusqu'à ce que les principaux services Kubernetes soient opérationnels.",
|
||||
"Waiting for SSH access ...": "En attente de l'accès SSH...",
|
||||
"Waiting for cluster to come online ...": "",
|
||||
"Waiting for:": "En attente de :",
|
||||
"Where to root the NFS Shares, defaults to /nfsshares (hyperkit driver only)": "Emplacement permettant d'accéder aux partages NFS en mode root, la valeur par défaut affichant /nfsshares (pilote hyperkit uniquement).",
|
||||
"Whether to use external switch over Default Switch if virtual switch not explicitly specified. (hyperv driver only)": "",
|
||||
"You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details": "Il semble que vous utilisiez un proxy, mais votre environment NO_PROXY n'inclut pas l'adresse IP ({{.ip_address}}) de minikube. Consultez la documentation à l'adresse {{.documentation_url}} pour en savoir plus.",
|
||||
"You can also use 'minikube kubectl -- get pods' to invoke a matching version": "",
|
||||
"You can delete them using the following command(s):": "",
|
||||
"You have selected Kubernetes v{{.new}}, but the existing cluster is running Kubernetes v{{.old}}": "",
|
||||
"You may need to manually remove the \"{{.name}}\" VM from your hypervisor": "Vous devrez peut-être supprimer la VM \"{{.name}}\" manuellement de votre hyperviseur.",
|
||||
"You may need to stop the Hyper-V Manager and run `minikube delete` again.": "",
|
||||
"You must specify a service name": "",
|
||||
|
|
@ -562,45 +571,43 @@
|
|||
"Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.": "",
|
||||
"Your minikube config refers to an unsupported driver. Erase ~/.minikube, and try again.": "",
|
||||
"Your minikube vm is not running, try minikube start.": "",
|
||||
"adding node": "",
|
||||
"addon '{{.name}}' is currently not enabled.\nTo enable this addon run:\nminikube addons enable {{.name}}": "",
|
||||
"addon '{{.name}}' is not a valid addon packaged with minikube.\nTo see the list of available addons run:\nminikube addons list": "",
|
||||
"addons modifies minikube addons files using subcommands like \"minikube addons enable dashboard\"": "",
|
||||
"api load": "",
|
||||
"bash completion failed": "",
|
||||
"call with cleanup=true to remove old tunnels": "",
|
||||
"command runner": "",
|
||||
"config modifies minikube config files using subcommands like \"minikube config set driver kvm\"\nConfigurable fields:\\n\\n": "",
|
||||
"config view failed": "",
|
||||
"creating api client": "",
|
||||
"dashboard service is not running: {{.error}}": "",
|
||||
"deleting node": "",
|
||||
"disable failed": "",
|
||||
"dry-run mode. Validates configuration, but does not mutate system state": "",
|
||||
"dry-run validation complete!": "",
|
||||
"enable failed": "",
|
||||
"error creating clientset": "",
|
||||
"error creating machine client": "",
|
||||
"error getting primary control plane": "",
|
||||
"error getting ssh port": "",
|
||||
"error parsing the input ip address for mount": "",
|
||||
"error starting tunnel": "",
|
||||
"error stopping tunnel": "",
|
||||
"failed to open browser: {{.error}}": "",
|
||||
"getting config": "",
|
||||
"getting primary control plane": "",
|
||||
"generating join token": "",
|
||||
"if true, will embed the certs in kubeconfig.": "",
|
||||
"if you want to create a profile you can by this command: minikube start -p {{.profile_name}}": "",
|
||||
"initialization failed, will try again: {{.error}}": "",
|
||||
"joining cluster": "",
|
||||
"kubeadm detected a TCP port conflict with another process: probably another local Kubernetes installation. Run lsof -p\u003cport\u003e to find the process and kill it": "",
|
||||
"kubectl and minikube configuration will be stored in {{.home_folder}}": "Les configurations kubectl et minikube seront stockées dans le dossier {{.home_folder}}.",
|
||||
"kubectl not found in PATH, but is required for the dashboard. Installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "",
|
||||
"kubectl proxy": "",
|
||||
"loading config": "",
|
||||
"libmachine failed": "",
|
||||
"logdir set failed": "",
|
||||
"machine '{{.name}}' does not exist. Proceeding ahead with recreating VM.": "",
|
||||
"max time to wait per Kubernetes core services to be healthy.": "",
|
||||
"minikube addons list --output OUTPUT. json, list": "",
|
||||
"minikube is exiting due to an error. If the above message is not useful, open an issue:": "",
|
||||
"minikube is unable to access the Google Container Registry. You may need to configure it to use a HTTP proxy.": "",
|
||||
"minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --driver\n\t- Use --force to override this connectivity check": "",
|
||||
"minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --vm-driver\n\t- Use --force to override this connectivity check": "",
|
||||
"minikube profile was successfully set to {{.profile_name}}": "",
|
||||
"minikube status --output OUTPUT. json, text": "",
|
||||
"minikube {{.version}} is available! Download it: {{.url}}": "",
|
||||
|
|
@ -609,14 +616,16 @@
|
|||
"mount failed": "",
|
||||
"namespaces to pause": "",
|
||||
"namespaces to unpause": "",
|
||||
"none driver does not support multi-node clusters": "",
|
||||
"not enough arguments ({{.ArgCount}}).\\nusage: minikube config set PROPERTY_NAME PROPERTY_VALUE": "",
|
||||
"pause containers": "",
|
||||
"profile sets the current minikube profile, or gets the current profile if no arguments are provided. This is used to run and manage multiple minikube instance. You can return to the default minikube profile by running `minikube profile default`": "",
|
||||
"profile {{.name}} is not running.": "",
|
||||
"reload cached images.": "",
|
||||
"reloads images previously added using the 'cache add' subcommand": "",
|
||||
"retrieving node": "",
|
||||
"saving node": "",
|
||||
"service {{.namespace_name}}/{{.service_name}} has no node port": "",
|
||||
"setting up certs": "",
|
||||
"stat failed": "",
|
||||
"status json failure": "",
|
||||
"status text failure": "",
|
||||
|
|
@ -641,16 +650,16 @@
|
|||
"usage: minikube delete": "",
|
||||
"usage: minikube profile [MINIKUBE_PROFILE_NAME]": "",
|
||||
"zsh completion failed": "",
|
||||
"{{.cluster}} IP has been updated to point at {{.ip}}": "",
|
||||
"{{.cluster}} IP was already correctly configured for {{.ip}}": "",
|
||||
"{{.driver_name}} \"{{.cluster}}\" {{.machine_type}} is missing, will recreate.": "",
|
||||
"{{.driver}} does not appear to be installed": "",
|
||||
"{{.driver}} does not appear to be installed, but is specified by an existing profile. Please run 'minikube delete' or install {{.driver}}": "",
|
||||
"{{.extra_option_component_name}}.{{.key}}={{.value}}": "",
|
||||
"{{.machine}} IP has been updated to point at {{.ip}}": "",
|
||||
"{{.machine}} IP was already correctly configured for {{.ip}}": "",
|
||||
"{{.name}} cluster does not exist": "",
|
||||
"{{.name}} has no available configuration options": "",
|
||||
"{{.name}} is already running": "",
|
||||
"{{.name}} was successfully configured": "",
|
||||
"{{.path}} is version {{.client_version}}, and is incompatible with Kubernetes {{.cluster_version}}. You will need to update {{.path}} or use 'minikube kubectl' to connect with this cluster": "",
|
||||
"{{.path}} is v{{.client_version}}, which may be incompatible with Kubernetes v{{.cluster_version}}.": "",
|
||||
"{{.prefix}}minikube {{.version}} on {{.platform}}": "{{.prefix}}minikube {{.version}} sur {{.platform}}",
|
||||
"{{.type}} is not yet a supported filesystem. We will try anyways!": "",
|
||||
"{{.url}} is not accessible: {{.error}}": ""
|
||||
|
|
|
|||
|
|
@ -1,12 +1,11 @@
|
|||
{
|
||||
"\"The '{{.minikube_addon}}' addon is disabled": "",
|
||||
"\"{{.machineName}}\" does not exist, nothing to stop": "",
|
||||
"\"{{.minikube_addon}}\" was successfully disabled": "「{{.minikube_addon}}」が無効化されました",
|
||||
"\"{{.name}}\" cluster does not exist. Proceeding ahead with cleanup.": "「{{.name}}」というクラスターは存在しません。クリーンアップ処理を続行します。",
|
||||
"\"{{.name}}\" profile does not exist": "「{{.name}}」というプロファイルは存在しません",
|
||||
"\"{{.name}}\" profile does not exist, trying anyways.": "",
|
||||
"\"{{.node_name}}\" stopped.": "",
|
||||
"\"{{.profile_name}}\" VM does not exist, nothing to stop": "「{{.profile_name}}」というVMは存在しません。停止すべき対象がありません",
|
||||
"\"{{.profile_name}}\" does not exist, nothing to stop": "",
|
||||
"\"{{.profile_name}}\" host does not exist, unable to show an IP": "「{{.profile_name}}」というホストは存在しません。IPを表示できません",
|
||||
"\"{{.profile_name}}\" stopped.": "「{{.profile_name}}」が停止しました。",
|
||||
"'none' driver does not support 'minikube docker-env' command": "「none」ドライバーは「minikube docker-env」コマンドをサポートしていません",
|
||||
|
|
@ -14,7 +13,6 @@
|
|||
"'none' driver does not support 'minikube podman-env' command": "",
|
||||
"'none' driver does not support 'minikube ssh' command": "「none」ドライバーは「minikube ssh」コマンドをサポートしていません",
|
||||
"'{{.driver}}' driver reported an issue: {{.error}}": "「{{.driver}}」ドライバーがエラーを報告しました: {{.error}}",
|
||||
"'{{.profile}}' is not running": "",
|
||||
"- {{.profile}}": "",
|
||||
"A VPN or firewall is interfering with HTTP access to the minikube VM. Alternatively, try a different VM driver: https://minikube.sigs.k8s.io/docs/start/": "",
|
||||
"A firewall is blocking Docker the minikube VM from reaching the internet. You may need to configure it to use a proxy.": "",
|
||||
|
|
@ -36,12 +34,11 @@
|
|||
"Adds a node to the given cluster config, and starts it.": "",
|
||||
"Adds a node to the given cluster.": "",
|
||||
"Advanced Commands:": "",
|
||||
"After minikube restart the dockerd ports might have changed. To ensure docker-env works properly.\nPlease re-eval the docker-env command:\n\n\t'minikube -p {{.profile_name}} docker-env'": "",
|
||||
"Aliases": "",
|
||||
"Allow user prompts for more information": "",
|
||||
"Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to \\\"auto\\\" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers": "Docker イメージの pull 元の代替イメージ リポジトリ。これは、gcr.io へのアクセスが制限されている場合に使用できます。これを \\\"auto\\\" に設定すると、minikube によって自動的に指定されるようになります。中国本土のユーザーの場合、registry.cn-hangzhou.aliyuncs.com/google_containers などのローカル gcr.io ミラーを使用できます。",
|
||||
"Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g)": "minikube VM に割り当てられた RAM 容量(形式: \u003cnumber\u003e[\u003cunit\u003e]、unit = b、k、m、g)",
|
||||
"Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "",
|
||||
"Amount of RAM to allocate to Kubernetes (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "",
|
||||
"Amount of time to wait for a service in seconds": "",
|
||||
"Amount of time to wait for service in seconds": "",
|
||||
"Another hypervisor, such as VirtualBox, is conflicting with KVM. Please stop the other hypervisor, or use --driver to switch to it.": "",
|
||||
|
|
@ -52,6 +49,7 @@
|
|||
"Because you are using docker driver on Mac, the terminal needs to be open to run it.": "",
|
||||
"Bind Address: {{.Address}}": "",
|
||||
"Block until the apiserver is servicing API requests": "",
|
||||
"Both driver={{.driver}} and vm-driver={{.vmd}} have been set.\n\n Since vm-driver is deprecated, minikube will default to driver={{.driver}}.\n\n If vm-driver is set in the global config, please run \"minikube config unset vm-driver\" to resolve this warning.": "",
|
||||
"Cannot find directory {{.path}} for mount": "",
|
||||
"Cannot use both --output and --format options": "",
|
||||
"Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start": "",
|
||||
|
|
@ -70,9 +68,9 @@
|
|||
"Could not process error from failed deletion": "",
|
||||
"Could not process errors from failed deletion": "",
|
||||
"Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.": "使用するイメージミラーの国コード。グローバルのものを使用する場合は空のままにします。中国本土のユーザーの場合は、「cn」に設定します。",
|
||||
"Creating Kubernetes in {{.driver_name}} container with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "",
|
||||
"Creating Kubernetes in {{.driver_name}} {{.machine_type}} with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "",
|
||||
"Creating mount {{.name}} ...": "マウント {{.name}} を作成しています...",
|
||||
"Creating {{.driver_name}} VM (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "",
|
||||
"Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "",
|
||||
"DEPRECATED, use `driver` instead.": "",
|
||||
"Default group id used for the mount": "",
|
||||
"Default user id used for the mount": "",
|
||||
|
|
@ -101,10 +99,9 @@
|
|||
"Done! kubectl is now configured to use \"{{.name}}\"": "",
|
||||
"Done! kubectl is now configured to use \"{{.name}}__1": "完了しました。kubectl が「{{.name}}」を使用するよう構成されました",
|
||||
"Download complete!": "ダウンロードが完了しました。",
|
||||
"Downloading Kubernetes {{.version}} preload ...": "",
|
||||
"Downloading VM boot image ...": "",
|
||||
"Downloading driver {{.driver}}:": "",
|
||||
"Downloading preloaded images tarball for k8s {{.version}} ...": "",
|
||||
"Downloading {{.name}} {{.version}}": "",
|
||||
"ERROR creating `registry-creds-acr` secret": "",
|
||||
"ERROR creating `registry-creds-dpr` secret": "",
|
||||
"ERROR creating `registry-creds-ecr` secret: {{.error}}": "",
|
||||
|
|
@ -113,7 +110,6 @@
|
|||
"Enable addons. see `minikube addons list` for a list of valid addon names.": "",
|
||||
"Enable experimental NVIDIA GPU support in minikube": "minikube での試験運用版 NVIDIA GPU の対応を有効にします",
|
||||
"Enable host resolver for NAT DNS requests (virtualbox driver only)": "NAT DNS リクエスト用のホストリゾルバを有効にします(virtualbox ドライバのみ)",
|
||||
"Enable istio needs {{.minMem}} MB of memory and {{.minCpus}} CPUs.": "",
|
||||
"Enable proxy for NAT DNS requests (virtualbox driver only)": "NAT DNS リクエスト用のプロキシを有効にします(virtualbox ドライバのみ)",
|
||||
"Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \\\"--network-plugin=cni\\": "デフォルトの CNI プラグイン(/etc/cni/net.d/k8s.conf)を有効にします。\\\"--network-plugin=cni\\\" と組み合わせて使用されます。",
|
||||
"Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \\\"--network-plugin=cni\\\".": "",
|
||||
|
|
@ -135,45 +131,29 @@
|
|||
"Error finding port for mount": "",
|
||||
"Error generating set output": "",
|
||||
"Error generating unset output": "",
|
||||
"Error getting IP": "",
|
||||
"Error getting client": "",
|
||||
"Error getting client: {{.error}}": "",
|
||||
"Error getting cluster": "",
|
||||
"Error getting cluster bootstrapper": "",
|
||||
"Error getting cluster config": "",
|
||||
"Error getting config": "",
|
||||
"Error getting control plane": "",
|
||||
"Error getting host": "",
|
||||
"Error getting host IP": "",
|
||||
"Error getting host status": "",
|
||||
"Error getting machine logs": "",
|
||||
"Error getting port binding for '{{.driver_name}} driver: {{.error}}": "",
|
||||
"Error getting primary control plane": "",
|
||||
"Error getting primary cp": "",
|
||||
"Error getting service status": "",
|
||||
"Error getting service with namespace: {{.namespace}} and labels {{.labelName}}:{{.addonName}}: {{.error}}": "",
|
||||
"Error getting ssh client": "",
|
||||
"Error getting the host IP address to use from within the VM": "",
|
||||
"Error host driver ip status": "",
|
||||
"Error killing mount process": "",
|
||||
"Error loading api": "",
|
||||
"Error loading profile config": "",
|
||||
"Error loading profile config: {{.error}}": "",
|
||||
"Error loading profile {{.name}}: {{.error}}": "プロファイル {{.name}} の読み込み中にエラーが発生しました。{{.error}}",
|
||||
"Error opening service": "",
|
||||
"Error parsing Driver version: {{.error}}": "Driver バージョンの解析中にエラーが発生しました。{{.error}}",
|
||||
"Error parsing minikube version: {{.error}}": "minikube バージョンの解析中にエラーが発生しました。{{.error}}",
|
||||
"Error reading {{.path}}: {{.error}}": "",
|
||||
"Error retrieving node": "",
|
||||
"Error starting cluster": "",
|
||||
"Error starting mount": "",
|
||||
"Error starting node": "",
|
||||
"Error while setting kubectl current context : {{.error}}": "",
|
||||
"Error writing mount pid": "",
|
||||
"Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}\"": "",
|
||||
"Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}": "エラー: Kubernetes v{{.new}} が選択されましたが、使用しているプロファイルの既存クラスタで実行されているのは Kubernetes v{{.old}} です。非破壊的なダウングレードはサポートされていませんが、以下のいずれかの方法で続行できます。\n* Kubernetes v{{.new}} を使用してクラスタを再作成する: 「minikube delete {{.profile}}」を実行してから、「minikube start {{.profile}} --kubernetes-version={{.new}}」を実行します。\n* Kubernetes v{{.new}} を使用して 2 つ目のクラスタを作成する: 「minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}」を実行します。\n* Kubernetes v{{.old}} 以降を使用して既存のクラスタを再利用する: 「minikube start {{.profile}} --kubernetes-version={{.old}}」を実行します。",
|
||||
"Error: [{{.id}}] {{.error}}": "",
|
||||
"Examples": "",
|
||||
"Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'": "",
|
||||
"Exiting": "終了しています",
|
||||
"Exiting.": "終了しています。",
|
||||
"External Adapter on which external switch will be created if no external switch is found. (hyperv driver only)": "",
|
||||
|
|
@ -181,38 +161,38 @@
|
|||
"Failed to cache ISO": "",
|
||||
"Failed to cache and load images": "",
|
||||
"Failed to cache binaries": "",
|
||||
"Failed to cache images": "",
|
||||
"Failed to cache images to tar": "",
|
||||
"Failed to cache kubectl": "",
|
||||
"Failed to change permissions for {{.minikube_dir_path}}: {{.error}}": "{{.minikube_dir_path}} に対する権限を変更できませんでした。{{.error}}",
|
||||
"Failed to check if machine exists": "",
|
||||
"Failed to check main repository and mirrors for images for images": "",
|
||||
"Failed to delete cluster: {{.error}}": "クラスタを削除できませんでした。{{.error}}",
|
||||
"Failed to delete cluster: {{.error}}__1": "クラスタを削除できませんでした。{{.error}}",
|
||||
"Failed to delete images": "",
|
||||
"Failed to delete images from config": "",
|
||||
"Failed to delete node {{.name}}": "",
|
||||
"Failed to enable container runtime": "",
|
||||
"Failed to generate config": "",
|
||||
"Failed to get API Server URL": "",
|
||||
"Failed to get bootstrapper": "",
|
||||
"Failed to get command runner": "",
|
||||
"Failed to get driver URL": "",
|
||||
"Failed to get image map": "",
|
||||
"Failed to get machine client": "",
|
||||
"Failed to get service URL: {{.error}}": "",
|
||||
"Failed to kill mount process: {{.error}}": "マウント プロセスを強制終了できませんでした。{{.error}}",
|
||||
"Failed to list cached images": "",
|
||||
"Failed to parse kubernetes version": "",
|
||||
"Failed to reload cached images": "",
|
||||
"Failed to save config": "",
|
||||
"Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}": "NO_PROXY 環境変数を設定できませんでした。「export NO_PROXY=$NO_PROXY,{{.ip}}」を使用してください。",
|
||||
"Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.": "",
|
||||
"Failed to setup certs": "",
|
||||
"Failed to setup kubeconfig": "",
|
||||
"Failed to start node {{.name}}": "",
|
||||
"Failed to stop node {{.name}}": "",
|
||||
"Failed to update cluster": "",
|
||||
"Failed to update config": "",
|
||||
"Failed unmount: {{.error}}": "",
|
||||
"File permissions used for the mount": "",
|
||||
"Filter to use only VM Drivers": "",
|
||||
"Flags": "",
|
||||
"Follow": "",
|
||||
"For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "最適な結果を得るには、kubectl を次のサイト https://kubernetes.io/docs/tasks/tools/install-kubectl/ からインストールしてください",
|
||||
|
|
@ -222,13 +202,16 @@
|
|||
"Force minikube to perform possibly dangerous operations": "minikube で危険な可能性のある操作を強制的に実行します",
|
||||
"Found network options:": "ネットワーク オプションが見つかりました。",
|
||||
"Found {{.number}} invalid profile(s) !": "",
|
||||
"Generate unable to parse disk size '{{.diskSize}}': {{.error}}": "",
|
||||
"Generate unable to parse memory '{{.memory}}': {{.error}}": "",
|
||||
"Gets the kubernetes URL(s) for the specified service in your local cluster": "",
|
||||
"Gets the kubernetes URL(s) for the specified service in your local cluster. In the case of multiple URLs they will be printed one at a time.": "",
|
||||
"Gets the logs of the running instance, used for debugging minikube, not user code.": "",
|
||||
"Gets the status of a local kubernetes cluster": "",
|
||||
"Gets the status of a local kubernetes cluster.\n\tExit status contains the status of minikube's VM, cluster and kubernetes encoded on it's bits in this order from right to left.\n\tEg: 7 meaning: 1 (for minikube NOK) + 2 (for cluster NOK) + 4 (for kubernetes NOK)": "",
|
||||
"Gets the value of PROPERTY_NAME from the minikube config file": "",
|
||||
"Getting machine config failed": "",
|
||||
"Getting bootstrapper": "",
|
||||
"Getting primary control plane": "",
|
||||
"Global Flags": "",
|
||||
"Go template format string for the cache list output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#CacheListTemplate": "",
|
||||
"Go template format string for the config view output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate": "",
|
||||
|
|
@ -239,6 +222,7 @@
|
|||
"Hyperkit is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "",
|
||||
"Hyperkit networking is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "",
|
||||
"If set, automatically updates drivers to the latest version. Defaults to true.": "",
|
||||
"If set, download tarball of preloaded images if available to improve start time. Defaults to true.": "",
|
||||
"If set, install addons. Defaults to true.": "",
|
||||
"If set, pause all namespaces": "",
|
||||
"If set, unpause all namespaces": "",
|
||||
|
|
@ -255,8 +239,9 @@
|
|||
"Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added.": "Docker デーモンに渡す Docker レジストリが安全ではありません。デフォルトのサービス CIDR 範囲が自動的に追加されます。",
|
||||
"Install VirtualBox, or select an alternative value for --driver": "",
|
||||
"Install the latest hyperkit binary, and run 'minikube delete'": "",
|
||||
"Invalid size passed in argument: {{.error}}": "",
|
||||
"IsEnabled failed": "",
|
||||
"Istio needs {{.minCPUs}} CPUs -- your configuration only allocates {{.cpus}} CPUs": "",
|
||||
"Istio needs {{.minMem}}MB of memory -- your configuration only allocates {{.memory}}MB": "",
|
||||
"Kill the mount process spawned by minikube start": "",
|
||||
"Kubernetes {{.new}} is now available. If you would like to upgrade, specify: --kubernetes-version={{.new}}": "",
|
||||
"Kubernetes {{.version}} is not supported by this release of minikube": "",
|
||||
|
|
@ -271,7 +256,7 @@
|
|||
"Local folders to share with Guest via NFS mounts (hyperkit driver only)": "NFS マウントを介してゲストと共有するローカル フォルダ(hyperkit ドライバのみ)",
|
||||
"Location of the VPNKit socket used for networking. If empty, disables Hyperkit VPNKitSock, if 'auto' uses Docker for Mac VPNKit connection, otherwise uses the specified VSock (hyperkit driver only)": "ネットワーキングに使用する VPNKit ソケットのロケーション。空の場合、Hyperkit VPNKitSock が無効になり、「auto」の場合、Mac VPNKit 接続に Docker が使用され、それ以外の場合、指定された VSock が使用されます(hyperkit ドライバのみ)",
|
||||
"Location of the minikube iso": "minikube iso のロケーション",
|
||||
"Location of the minikube iso.": "",
|
||||
"Locations to fetch the minikube ISO from.": "",
|
||||
"Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'": "",
|
||||
"Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'.": "",
|
||||
"Message Size: {{.size}}": "",
|
||||
|
|
@ -289,15 +274,18 @@
|
|||
"NOTE: This process must stay alive for the mount to be accessible ...": "",
|
||||
"Networking and Connectivity Commands:": "",
|
||||
"No minikube profile was found. You can create one using `minikube start`.": "",
|
||||
"Node \"{{.node_name}}\" stopped.": "",
|
||||
"Node may be unable to resolve external DNS records": "",
|
||||
"Node operations": "",
|
||||
"Node {{.name}} was successfully deleted.": "",
|
||||
"Node {{.nodeName}} does not exist.": "",
|
||||
"Non-destructive downgrades are not supported, but you can proceed with one of the following options:\n\n 1) Recreate the cluster with Kubernetes v{{.new}}, by running:\n\n minikube delete{{.profile}}\n minikube start{{.profile}} --kubernetes-version={{.new}}\n\n 2) Create a second cluster with Kubernetes v{{.new}}, by running:\n\n minikube start -p {{.suggestedName}} --kubernetes-version={{.new}}\n\n 3) Use the existing cluster at version Kubernetes v{{.old}}, by running:\n\n minikube start{{.profile}} --kubernetes-version={{.old}}": "",
|
||||
"None of the known repositories in your location are accessible. Using {{.image_repository_name}} as fallback.": "使用しているロケーション内で既知のいずれのリポジトリにもアクセスできません。フォールバックとして {{.image_repository_name}} を使用します。",
|
||||
"None of the known repositories is accessible. Consider specifying an alternative image repository with --image-repository flag": "既知のいずれのリポジトリにもアクセスできません。--image-repository フラグとともに代替のイメージ リポジトリを指定することを検討してください。",
|
||||
"Not passing {{.name}}={{.value}} to docker env.": "",
|
||||
"Noticed that you are using minikube docker-env:": "",
|
||||
"Noticed you have an activated docker-env on {{.driver_name}} driver in this terminal:": "",
|
||||
"Number of CPUs allocated to Kubernetes.": "",
|
||||
"Number of CPUs allocated to the minikube VM": "minikube VM に割り当てられた CPU の数",
|
||||
"Number of CPUs allocated to the minikube VM.": "",
|
||||
"Number of lines back to go within the log": "",
|
||||
"OS release is {{.pretty_name}}": "",
|
||||
"Open the addons URL with https instead of http": "",
|
||||
|
|
@ -318,6 +306,7 @@
|
|||
"Please install the minikube hyperkit VM driver, or select an alternative --driver": "",
|
||||
"Please install the minikube kvm2 VM driver, or select an alternative --driver": "",
|
||||
"Please make sure the service you are looking for is deployed or is in the correct namespace.": "",
|
||||
"Please re-eval your docker-env, To ensure your environment variables have updated ports: \n\n\t'minikube -p {{.profile_name}} docker-env'": "",
|
||||
"Please specify the directory to be mounted: \n\tminikube mount \u003csource directory\u003e:\u003ctarget directory\u003e (example: \"/host-home:/vm-home\")": "",
|
||||
"Please upgrade the '{{.driver_executable}}'. {{.documentation_url}}": "「{{.driver_executable}}」をアップグレードしてください。{{.documentation_url}}",
|
||||
"Populates the specified folder with documentation in markdown about minikube": "",
|
||||
|
|
@ -331,10 +320,10 @@
|
|||
"Profile gets or sets the current minikube profile": "",
|
||||
"Profile name \"{{.profilename}}\" is minikube keyword. To delete profile use command minikube delete -p \u003cprofile name\u003e": "",
|
||||
"Provide VM UUID to restore MAC address (hyperkit driver only)": "MAC アドレスを復元するための VM UUID を指定します(hyperkit ドライバのみ)",
|
||||
"Pulling base image ...": "",
|
||||
"Reboot to complete VirtualBox installation, verify that VirtualBox is not blocked by your system, and/or use another hypervisor": "",
|
||||
"Rebuild libvirt with virt-network support": "",
|
||||
"Received {{.name}} signal": "",
|
||||
"Reconfiguring existing host ...": "",
|
||||
"Registry mirrors to pass to the Docker daemon": "Docker デーモンに渡すレジストリ ミラー",
|
||||
"Reinstall VirtualBox and reboot. Alternatively, try the kvm2 driver: https://minikube.sigs.k8s.io/docs/reference/drivers/kvm2/": "",
|
||||
"Reinstall VirtualBox and verify that it is not blocked: System Preferences -\u003e Security \u0026 Privacy -\u003e General -\u003e Some system software was blocked from loading": "",
|
||||
|
|
@ -345,7 +334,10 @@
|
|||
"Requested cpu count {{.requested_cpus}} is less than the minimum allowed of {{.minimum_cpus}}": "",
|
||||
"Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}": "リクエストされたディスクサイズ {{.requested_size}} が最小値 {{.minimum_size}} 未満です",
|
||||
"Requested memory allocation ({{.memory}}MB) is less than the default memory allocation of {{.default_memorysize}}MB. Beware that minikube might not work correctly or crash unexpectedly.": "リクエストされたメモリ割り当て({{.memory}} MB)がデフォルトのメモリ割り当て {{.default_memorysize}} MB 未満です。minikube が正常に動作しないか、予期せずクラッシュする可能性があることに注意してください。",
|
||||
"Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommended}}MB. Kubernetes may crash unexpectedly.": "",
|
||||
"Requested memory allocation {{.requested_size}} is less than the minimum allowed of {{.minimum_size}}": "リクエストされたメモリ割り当て {{.requested_size}} が許可される最小値 {{.minimum_size}} 未満です",
|
||||
"Requested memory allocation {{.requested}}MB is less than the usable minimum of {{.minimum}}MB": "",
|
||||
"Retarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "",
|
||||
"Retrieve the ssh identity key path of the specified cluster": "",
|
||||
"Retrieve the ssh identity key path of the specified cluster.": "",
|
||||
"Retrieves the IP address of the running cluster": "",
|
||||
|
|
@ -358,8 +350,10 @@
|
|||
"Run minikube from the C: drive.": "",
|
||||
"Run the kubernetes client, download it if necessary. Remember -- after kubectl!\n\nExamples:\nminikube kubectl -- --help\nminikube kubectl -- get pods --namespace kube-system": "",
|
||||
"Run the minikube command as an Administrator": "",
|
||||
"Run: \"{{.delete}}\", then \"{{.start}} --alsologtostderr -v=1\" to try again with more logging": "",
|
||||
"Run: 'chmod 600 $HOME/.kube/config'": "",
|
||||
"Running on localhost (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "",
|
||||
"Service '{{.service}}' was not found in '{{.namespace}}' namespace.\nYou may select another namespace by using 'minikube service {{.service}} -n \u003cnamespace\u003e'. Or list out all the services using 'minikube service list'": "",
|
||||
"Set failed": "",
|
||||
"Set flag to delete all profiles": "",
|
||||
"Set this flag to delete the '.minikube' folder from your user directory.": "",
|
||||
|
|
@ -385,8 +379,8 @@
|
|||
"Specify the 9p version that the mount should use": "",
|
||||
"Specify the ip that the mount should be setup on": "",
|
||||
"Specify the mount filesystem type (supported types: 9p)": "",
|
||||
"Starting existing {{.driver_name}} VM for \"{{.profile_name}}\" ...": "",
|
||||
"Starting node": "",
|
||||
"StartHost failed again: {{.error}}": "",
|
||||
"StartHost failed, but will try again: {{.error}}": "",
|
||||
"Starting tunnel for service {{.service}}.": "",
|
||||
"Starts a local kubernetes cluster": "ローカルの Kubernetes クラスタを起動します",
|
||||
"Starts a node.": "",
|
||||
|
|
@ -399,7 +393,6 @@
|
|||
"Successfully added {{.name}} to {{.cluster}}!": "",
|
||||
"Successfully deleted all profiles": "",
|
||||
"Successfully mounted {{.sourcePath}} to {{.destinationPath}}": "",
|
||||
"Successfully powered off Hyper-V. minikube driver -- {{.driver}}": "",
|
||||
"Successfully purged minikube directory located at - [{{.minikubeDirectory}}]": "",
|
||||
"Suggestion: {{.advice}}": "",
|
||||
"Suggestion: {{.fix}}": "",
|
||||
|
|
@ -431,12 +424,16 @@
|
|||
"The cluster dns domain name used in the kubernetes cluster": "Kubernetes クラスタで使用されるクラスタ DNS ドメイン名",
|
||||
"The container runtime to be used (docker, crio, containerd)": "使用されるコンテナ ランタイム(docker、crio、containerd)",
|
||||
"The container runtime to be used (docker, crio, containerd).": "",
|
||||
"The control plane for \"{{.name}}\" is paused!": "",
|
||||
"The control plane node \"{{.name}}\" does not exist.": "",
|
||||
"The control plane node is not running (state={{.state}})": "",
|
||||
"The control plane node must be running for this command": "",
|
||||
"The cri socket path to be used": "使用される CRI ソケットパス",
|
||||
"The cri socket path to be used.": "",
|
||||
"The docker service within '{{.profile}}' is not active": "",
|
||||
"The docker service within '{{.name}}' is not active": "",
|
||||
"The docker-env command is only compatible with the \"docker\" runtime, but this cluster was configured to use the \"{{.runtime}}\" runtime.": "",
|
||||
"The driver '{{.driver}}' is not supported on {{.os}}": "ドライバ「{{.driver}}」は、{{.os}} ではサポートされていません",
|
||||
"The driver {{.experimental}} '{{.driver}}' is not supported on {{.os}}": "",
|
||||
"The existing \"{{.profile_name}}\" VM that was created using the \"{{.old_driver}}\" driver, and is incompatible with the \"{{.driver}}\" driver.": "",
|
||||
"The existing \"{{.name}}\" VM was created using the \"{{.old}}\" driver, and is incompatible with the \"{{.new}}\" driver.": "",
|
||||
"The hyperv virtual switch name. Defaults to first found. (hyperv driver only)": "hyperv 仮想スイッチ名。最初に見つかったものにデフォルト設定されます(hyperv ドライバのみ)",
|
||||
"The hypervisor does not appear to be configured properly. Run 'minikube start --alsologtostderr -v=1' and inspect the error code": "",
|
||||
"The initial time interval for each check that wait performs in seconds": "",
|
||||
|
|
@ -448,10 +445,14 @@
|
|||
"The name of the node to delete": "",
|
||||
"The name of the node to start": "",
|
||||
"The node to get logs from. Defaults to the primary control plane.": "",
|
||||
"The node to ssh into. Defaults to the primary control plane.": "",
|
||||
"The none driver is not compatible with multi-node clusters.": "",
|
||||
"The none driver requires conntrack to be installed for kubernetes version {{.k8sVersion}}": "",
|
||||
"The number of bytes to use for 9p packet payload": "",
|
||||
"The number of nodes to spin up. Defaults to 1.": "",
|
||||
"The output format. One of 'json', 'table'": "",
|
||||
"The path on the file system where the docs in markdown need to be saved": "",
|
||||
"The podman service within '{{.profile}}' is not active": "",
|
||||
"The podman service within '{{.cluster}}' is not active": "",
|
||||
"The service namespace": "",
|
||||
"The service {{.service}} requires privileged ports to be exposed: {{.ports}}": "",
|
||||
"The services namespace": "",
|
||||
|
|
@ -460,19 +461,24 @@
|
|||
"The value passed to --format is invalid: {{.error}}": "",
|
||||
"The vmwarefusion driver is deprecated and support for it will be removed in a future release.\n\t\t\tPlease consider switching to the new vmware unified driver, which is intended to replace the vmwarefusion driver.\n\t\t\tSee https://minikube.sigs.k8s.io/docs/reference/drivers/vmware/ for more information.\n\t\t\tTo disable this message, run [minikube config set ShowDriverDeprecationNotification false]": "",
|
||||
"The {{.driver_name}} driver should not be used with root privileges.": "{{.driver_name}} ドライバをルート権限で使用しないでください。",
|
||||
"There is no local cluster named \"{{.cluster}}\"": "",
|
||||
"There's a new version for '{{.driver_executable}}'. Please consider upgrading. {{.documentation_url}}": "「{{.driver_executable}}」の新しいバージョンがあります。アップグレードを検討してください。{{.documentation_url}}",
|
||||
"These changes will take effect upon a minikube delete and then a minikube start": "",
|
||||
"This addon does not have an endpoint defined for the 'addons open' command.\nYou can add one by annotating a service with the label {{.labelName}}:{{.addonName}}": "",
|
||||
"This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true": "これは環境変数 CHANGE_MINIKUBE_NONE_USER=true を設定して自動的に行うこともできます",
|
||||
"This control plane is not running! (state={{.state}})": "",
|
||||
"This is unusual - you may want to investigate using \"{{.command}}\"": "",
|
||||
"This will keep the existing kubectl context and will create a minikube context.": "これにより既存の kubectl コンテキストが保持され、minikube コンテキストが作成されます。",
|
||||
"This will start the mount daemon and automatically mount files into minikube": "これによりマウント デーモンが起動し、ファイルが minikube に自動的にマウントされます",
|
||||
"This will start the mount daemon and automatically mount files into minikube.": "",
|
||||
"Tip: To remove this root owned cluster, run: sudo {{.cmd}}": "",
|
||||
"Tip: To remove this root owned cluster, run: sudo {{.cmd}} delete": "ヒント: この root 所有のクラスタを削除するには、「sudo {{.cmd}} delete」を実行します",
|
||||
"To connect to this cluster, use: kubectl --context={{.name}}": "このクラスタに接続するには、「kubectl --context={{.name}}」を使用します",
|
||||
"To connect to this cluster, use: kubectl --context={{.name}}__1": "このクラスタに接続するには、「kubectl --context={{.name}}」を使用します",
|
||||
"To connect to this cluster, use: kubectl --context={{.profile_name}}": "",
|
||||
"To disable this notice, run: 'minikube config set WantUpdateNotification false'\\n": "",
|
||||
"To proceed, either:\n\n 1) Delete the existing \"{{.profile_name}}\" cluster using: '{{.command}} delete'\n\n * or *\n\n 2) Start the existing \"{{.profile_name}}\" cluster using: '{{.command}} start --driver={{.old_driver}}'": "",
|
||||
"To fix this, run: {{.command}}": "",
|
||||
"To proceed, either:\n\n1) Delete the existing \"{{.name}}\" cluster using: '{{.delcommand}}'\n\n* or *\n\n2) Start the existing \"{{.name}}\" cluster using: '{{.command}} --driver={{.old}}'": "",
|
||||
"To see addons list for other profiles use: `minikube addons -p name list`": "",
|
||||
"To start minikube with HyperV Powershell must be in your PATH`": "",
|
||||
"To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:": "kubectl か minikube コマンドを独自のユーザーとして使用するには、そのコマンドの再配置が必要な場合があります。たとえば、独自の設定を上書きするには、以下を実行します。",
|
||||
|
|
@ -482,24 +488,31 @@
|
|||
"Unable to determine a default driver to use. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/": "",
|
||||
"Unable to enable dashboard": "",
|
||||
"Unable to fetch latest version info": "",
|
||||
"Unable to find control plane": "",
|
||||
"Unable to generate docs": "",
|
||||
"Unable to generate the documentation. Please ensure that the path specified is a directory, exists \u0026 you have permission to write to it.": "",
|
||||
"Unable to get VM IP address": "",
|
||||
"Unable to get addon status for {{.name}}: {{.error}}": "",
|
||||
"Unable to get bootstrapper: {{.error}}": "ブートストラッパを取得できません。{{.error}}",
|
||||
"Unable to get command runner": "",
|
||||
"Unable to get control plane status: {{.error}}": "",
|
||||
"Unable to get current user": "",
|
||||
"Unable to get driver IP": "",
|
||||
"Unable to get machine status": "",
|
||||
"Unable to get runtime": "",
|
||||
"Unable to get the status of the {{.name}} cluster.": "",
|
||||
"Unable to kill mount process: {{.error}}": "",
|
||||
"Unable to load cached images from config file.": "キャッシュに保存されているイメージを構成ファイルから読み込むことができません。",
|
||||
"Unable to load cached images: {{.error}}": "",
|
||||
"Unable to load config: {{.error}}": "構成を読み込むことができません。{{.error}}",
|
||||
"Unable to load host": "",
|
||||
"Unable to parse \"{{.kubernetes_version}}\": {{.error}}": "「{{.kubernetes_version}}」を解析できません。{{.error}}",
|
||||
"Unable to parse default Kubernetes version from constants: {{.error}}": "",
|
||||
"Unable to parse memory '{{.memory}}': {{.error}}": "",
|
||||
"Unable to parse oldest Kubernetes version from constants: {{.error}}": "",
|
||||
"Unable to pull images, which may be OK: {{.error}}": "イメージを pull できませんが、問題ありません。{{.error}}",
|
||||
"Unable to remove machine directory: %v": "",
|
||||
"Unable to start VM. Please investigate and run 'minikube delete' if possible": "",
|
||||
"Unable to remove machine directory": "",
|
||||
"Unable to restart cluster, will reset it: {{.error}}": "",
|
||||
"Unable to start VM after repeated tries. Please try {{'minikube delete' if possible": "",
|
||||
"Unable to stop VM": "",
|
||||
"Unable to update {{.driver}} driver: {{.error}}": "",
|
||||
"Unable to verify SSH connectivity: {{.error}}. Will retry...": "",
|
||||
|
|
@ -510,6 +523,8 @@
|
|||
"Unset the KUBECONFIG environment variable, or verify that it does not point to an empty or otherwise invalid path": "",
|
||||
"Unset variables instead of setting them": "",
|
||||
"Update server returned an empty list": "",
|
||||
"Updating node": "",
|
||||
"Updating the running {{.driver_name}} \"{{.cluster}}\" {{.machine_type}} ...": "",
|
||||
"Upgrade to QEMU v3.1.0+, run 'virt-host-validate', or ensure that you are not running in a nested VM environment.": "",
|
||||
"Upgrading from Kubernetes {{.old}} to {{.new}}": "Kubernetes を {{.old}} から {{.new}} にアップグレードしています",
|
||||
"Usage": "",
|
||||
|
|
@ -530,11 +545,11 @@
|
|||
"Userspace file server:": "",
|
||||
"Using image repository {{.name}}": "イメージ リポジトリ {{.name}} を使用しています",
|
||||
"Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!": "",
|
||||
"Using the running {{.driver_name}} \"{{.profile_name}}\" VM ...": "",
|
||||
"Using the {{.driver}} driver based on existing profile": "",
|
||||
"Using the {{.driver}} driver based on user configuration": "",
|
||||
"VM driver is one of: %v": "VM ドライバは次のいずれかです。%v",
|
||||
"VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository": "",
|
||||
"Validation unable to parse disk size '{{.diskSize}}': {{.error}}": "",
|
||||
"Verify that your HTTP_PROXY and HTTPS_PROXY environment variables are set correctly.": "",
|
||||
"Verify the IP address of the running cluster in kubeconfig.": "",
|
||||
"Verifying dashboard health ...": "",
|
||||
|
|
@ -549,11 +564,12 @@
|
|||
"Wait failed": "",
|
||||
"Wait failed: {{.error}}": "",
|
||||
"Wait until Kubernetes core services are healthy before exiting": "Kubernetes コアサービスが正常になるまで待機してから終了してください",
|
||||
"Waiting for cluster to come online ...": "",
|
||||
"Where to root the NFS Shares, defaults to /nfsshares (hyperkit driver only)": "NFS 共有のルートに指定する場所。デフォルトは /nfsshares(hyperkit ドライバのみ)",
|
||||
"Whether to use external switch over Default Switch if virtual switch not explicitly specified. (hyperv driver only)": "",
|
||||
"You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details": "プロキシを使用しようとしていますが、現在の NO_PROXY 環境に minikube IP({{.ip_address}})は含まれていません。詳細については、{{.documentation_url}} をご覧ください",
|
||||
"You can also use 'minikube kubectl -- get pods' to invoke a matching version": "",
|
||||
"You can delete them using the following command(s):": "",
|
||||
"You have selected Kubernetes v{{.new}}, but the existing cluster is running Kubernetes v{{.old}}": "",
|
||||
"You may need to manually remove the \"{{.name}}\" VM from your hypervisor": "ハイパーバイザから「{{.name}}」VM を手動で削除することが必要な可能性があります",
|
||||
"You may need to stop the Hyper-V Manager and run `minikube delete` again.": "",
|
||||
"You must specify a service name": "",
|
||||
|
|
@ -562,45 +578,43 @@
|
|||
"Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.": "",
|
||||
"Your minikube config refers to an unsupported driver. Erase ~/.minikube, and try again.": "",
|
||||
"Your minikube vm is not running, try minikube start.": "",
|
||||
"adding node": "",
|
||||
"addon '{{.name}}' is currently not enabled.\nTo enable this addon run:\nminikube addons enable {{.name}}": "",
|
||||
"addon '{{.name}}' is not a valid addon packaged with minikube.\nTo see the list of available addons run:\nminikube addons list": "",
|
||||
"addons modifies minikube addons files using subcommands like \"minikube addons enable dashboard\"": "",
|
||||
"api load": "",
|
||||
"bash completion failed": "",
|
||||
"call with cleanup=true to remove old tunnels": "",
|
||||
"command runner": "",
|
||||
"config modifies minikube config files using subcommands like \"minikube config set driver kvm\"\nConfigurable fields:\\n\\n": "",
|
||||
"config view failed": "",
|
||||
"creating api client": "",
|
||||
"dashboard service is not running: {{.error}}": "",
|
||||
"deleting node": "",
|
||||
"disable failed": "",
|
||||
"dry-run mode. Validates configuration, but does not mutate system state": "",
|
||||
"dry-run validation complete!": "",
|
||||
"enable failed": "",
|
||||
"error creating clientset": "",
|
||||
"error creating machine client": "",
|
||||
"error getting primary control plane": "",
|
||||
"error getting ssh port": "",
|
||||
"error parsing the input ip address for mount": "",
|
||||
"error starting tunnel": "",
|
||||
"error stopping tunnel": "",
|
||||
"failed to open browser: {{.error}}": "",
|
||||
"getting config": "",
|
||||
"getting primary control plane": "",
|
||||
"generating join token": "",
|
||||
"if true, will embed the certs in kubeconfig.": "",
|
||||
"if you want to create a profile you can by this command: minikube start -p {{.profile_name}}": "",
|
||||
"initialization failed, will try again: {{.error}}": "",
|
||||
"joining cluster": "",
|
||||
"kubeadm detected a TCP port conflict with another process: probably another local Kubernetes installation. Run lsof -p\u003cport\u003e to find the process and kill it": "",
|
||||
"kubectl and minikube configuration will be stored in {{.home_folder}}": "kubectl と minikube の構成は {{.home_folder}} に保存されます",
|
||||
"kubectl not found in PATH, but is required for the dashboard. Installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "",
|
||||
"kubectl proxy": "",
|
||||
"loading config": "",
|
||||
"libmachine failed": "",
|
||||
"logdir set failed": "",
|
||||
"machine '{{.name}}' does not exist. Proceeding ahead with recreating VM.": "",
|
||||
"max time to wait per Kubernetes core services to be healthy.": "",
|
||||
"minikube addons list --output OUTPUT. json, list": "",
|
||||
"minikube is exiting due to an error. If the above message is not useful, open an issue:": "",
|
||||
"minikube is unable to access the Google Container Registry. You may need to configure it to use a HTTP proxy.": "",
|
||||
"minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --driver\n\t- Use --force to override this connectivity check": "",
|
||||
"minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --vm-driver\n\t- Use --force to override this connectivity check": "",
|
||||
"minikube profile was successfully set to {{.profile_name}}": "",
|
||||
"minikube status --output OUTPUT. json, text": "",
|
||||
"minikube {{.version}} is available! Download it: {{.url}}": "",
|
||||
|
|
@ -609,14 +623,16 @@
|
|||
"mount failed": "",
|
||||
"namespaces to pause": "",
|
||||
"namespaces to unpause": "",
|
||||
"none driver does not support multi-node clusters": "",
|
||||
"not enough arguments ({{.ArgCount}}).\\nusage: minikube config set PROPERTY_NAME PROPERTY_VALUE": "",
|
||||
"pause containers": "",
|
||||
"profile sets the current minikube profile, or gets the current profile if no arguments are provided. This is used to run and manage multiple minikube instance. You can return to the default minikube profile by running `minikube profile default`": "",
|
||||
"profile {{.name}} is not running.": "",
|
||||
"reload cached images.": "",
|
||||
"reloads images previously added using the 'cache add' subcommand": "",
|
||||
"retrieving node": "",
|
||||
"saving node": "",
|
||||
"service {{.namespace_name}}/{{.service_name}} has no node port": "",
|
||||
"setting up certs": "",
|
||||
"stat failed": "",
|
||||
"status json failure": "",
|
||||
"status text failure": "",
|
||||
|
|
@ -641,16 +657,16 @@
|
|||
"usage: minikube delete": "",
|
||||
"usage: minikube profile [MINIKUBE_PROFILE_NAME]": "",
|
||||
"zsh completion failed": "",
|
||||
"{{.cluster}} IP has been updated to point at {{.ip}}": "",
|
||||
"{{.cluster}} IP was already correctly configured for {{.ip}}": "",
|
||||
"{{.driver_name}} \"{{.cluster}}\" {{.machine_type}} is missing, will recreate.": "",
|
||||
"{{.driver}} does not appear to be installed": "",
|
||||
"{{.driver}} does not appear to be installed, but is specified by an existing profile. Please run 'minikube delete' or install {{.driver}}": "",
|
||||
"{{.extra_option_component_name}}.{{.key}}={{.value}}": "",
|
||||
"{{.machine}} IP has been updated to point at {{.ip}}": "",
|
||||
"{{.machine}} IP was already correctly configured for {{.ip}}": "",
|
||||
"{{.name}} cluster does not exist": "",
|
||||
"{{.name}} has no available configuration options": "",
|
||||
"{{.name}} is already running": "",
|
||||
"{{.name}} was successfully configured": "",
|
||||
"{{.path}} is version {{.client_version}}, and is incompatible with Kubernetes {{.cluster_version}}. You will need to update {{.path}} or use 'minikube kubectl' to connect with this cluster": "",
|
||||
"{{.path}} is v{{.client_version}}, which may be incompatible with Kubernetes v{{.cluster_version}}.": "",
|
||||
"{{.prefix}}minikube {{.version}} on {{.platform}}": "{{.platform}} 上の {{.prefix}}minikube {{.version}}",
|
||||
"{{.type}} is not yet a supported filesystem. We will try anyways!": "",
|
||||
"{{.url}} is not accessible: {{.error}}": ""
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
{
|
||||
"\"The '{{.minikube_addon}}' addon is disabled": "\"The '{{.minikube_addon}}' 이 비활성화되었습니다",
|
||||
"\"{{.machineName}}\" does not exist, nothing to stop": "",
|
||||
"\"{{.name}}\" profile does not exist": "\"{{.name}}\" 프로필이 존재하지 않습니다",
|
||||
"\"{{.name}}\" profile does not exist, trying anyways.": "\"{{.name}}\" 프로필이 존재하지 않습니다, 그럼에도 불구하고 시도합니다",
|
||||
"\"{{.node_name}}\" stopped.": "\"{{.node_name}}\" 이 중단되었습니다",
|
||||
|
|
@ -29,11 +30,11 @@
|
|||
"Adds a node to the given cluster config, and starts it.": "노드 하나를 주어진 클러스터 컨피그에 추가하고 시작합니다",
|
||||
"Adds a node to the given cluster.": "노드 하나를 주어진 클러스터에 추가합니다",
|
||||
"Advanced Commands:": "고급 커맨드:",
|
||||
"After minikube restart the dockerd ports might have changed. To ensure docker-env works properly.\nPlease re-eval the docker-env command:\n\n\t'minikube -p {{.profile_name}} docker-env'": "",
|
||||
"Aliases": "",
|
||||
"Allow user prompts for more information": "많은 정보를 위해 사용자 프롬프트를 허가합니다",
|
||||
"Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to \\\"auto\\\" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers": "",
|
||||
"Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "minikube 가상 머신에 할당할 RAM 의 용량 (format: <number>[<unit>], where unit = b, k, m or g)",
|
||||
"Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "minikube 가상 머신에 할당할 RAM 의 용량 (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g)",
|
||||
"Amount of RAM to allocate to Kubernetes (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "",
|
||||
"Amount of time to wait for a service in seconds": "",
|
||||
"Amount of time to wait for service in seconds": "",
|
||||
"Another hypervisor, such as VirtualBox, is conflicting with KVM. Please stop the other hypervisor, or use --driver to switch to it.": "VirtualBox 와 같은 또 다른 하이퍼바이저가 KVM 과 충돌이 발생합니다. 다른 하이퍼바이저를 중단하거나 --driver 로 변경하세요",
|
||||
|
|
@ -44,6 +45,7 @@
|
|||
"Because you are using docker driver on Mac, the terminal needs to be open to run it.": "",
|
||||
"Bind Address: {{.Address}}": "",
|
||||
"Block until the apiserver is servicing API requests": "apiserver 가 API 요청을 서비스할 때까지 막습니다",
|
||||
"Both driver={{.driver}} and vm-driver={{.vmd}} have been set.\n\n Since vm-driver is deprecated, minikube will default to driver={{.driver}}.\n\n If vm-driver is set in the global config, please run \"minikube config unset vm-driver\" to resolve this warning.": "",
|
||||
"Cannot find directory {{.path}} for mount": "마운트하기 위한 디렉토리 {{.path}} 를 찾을 수 없습니다",
|
||||
"Cannot use both --output and --format options": "--output 과 --format 옵션을 함께 사용할 수 없습니다",
|
||||
"Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start": "",
|
||||
|
|
@ -62,9 +64,10 @@
|
|||
"Could not process error from failed deletion": "",
|
||||
"Could not process errors from failed deletion": "",
|
||||
"Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.": "",
|
||||
"Creating Kubernetes in {{.driver_name}} container with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "",
|
||||
"Creating Kubernetes in {{.driver_name}} {{.machine_type}} with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "",
|
||||
"Creating mount {{.name}} ...": "",
|
||||
"Creating {{.driver_name}} VM (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "{{.driver_name}} VM (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) 를 생성하는 중 ...",
|
||||
"Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "",
|
||||
"DEPRECATED, use `driver` instead.": "DEPRECATED 되었습니다, 'driver' 를 사용하세요",
|
||||
"Default group id used for the mount": "마운트를 위한 디폴트 group id",
|
||||
"Default user id used for the mount": "마운트를 위한 디폴트 user id",
|
||||
|
|
@ -89,9 +92,9 @@
|
|||
"Documentation: {{.url}}": "문서: {{.url}}",
|
||||
"Done! kubectl is now configured to use \"{{.name}}\"": "끝났습니다! 이제 kubectl 이 \"{{.name}}\" 를 사용할 수 있도록 설정되었습니다",
|
||||
"Download complete!": "다운로드가 성공하였습니다!",
|
||||
"Downloading Kubernetes {{.version}} preload ...": "",
|
||||
"Downloading VM boot image ...": "가상 머신 부트 이미지 다운로드 중 ...",
|
||||
"Downloading driver {{.driver}}:": "드라이버 {{.driver}} 다운로드 중 :",
|
||||
"Downloading preloaded images tarball for k8s {{.version}} ...": "",
|
||||
"Downloading {{.name}} {{.version}}": "{{.name}} {{.version}} 다운로드 중",
|
||||
"ERROR creating `registry-creds-acr` secret": "registry-creds-acr` secret 생성 오류",
|
||||
"ERROR creating `registry-creds-dpr` secret": "`registry-creds-dpr` secret 생성 오류",
|
||||
|
|
@ -101,7 +104,6 @@
|
|||
"Enable addons. see `minikube addons list` for a list of valid addon names.": "",
|
||||
"Enable experimental NVIDIA GPU support in minikube": "",
|
||||
"Enable host resolver for NAT DNS requests (virtualbox driver only)": "",
|
||||
"Enable istio needs {{.minMem}} MB of memory and {{.minCpus}} CPUs.": "",
|
||||
"Enable proxy for NAT DNS requests (virtualbox driver only)": "",
|
||||
"Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \\\"--network-plugin=cni\\\".": "",
|
||||
"Enables the addon w/ADDON_NAME within minikube (example: minikube addons enable dashboard). For a list of available addons use: minikube addons list": "",
|
||||
|
|
@ -128,19 +130,16 @@
|
|||
"Error getting cluster bootstrapper": "클러스터 부트스트래퍼 조회 오류",
|
||||
"Error getting cluster config": "클러스터 컨피그 조회 오류",
|
||||
"Error getting config": "컨피그 조회 오류",
|
||||
"Error getting control plane": "",
|
||||
"Error getting host": "호스트 조회 오류",
|
||||
"Error getting host IP": "호스트 IP 조회 오류",
|
||||
"Error getting host status": "호스트 상태 조회 오류",
|
||||
"Error getting machine logs": "머신 로그 조회 오류",
|
||||
"Error getting port binding for '{{.driver_name}} driver: {{.error}}": "",
|
||||
"Error getting primary control plane": "",
|
||||
"Error getting primary cp": "",
|
||||
"Error getting service status": "서비스 상태 조회 오류",
|
||||
"Error getting service with namespace: {{.namespace}} and labels {{.labelName}}:{{.addonName}}: {{.error}}": "",
|
||||
"Error getting ssh client": "ssh 클라이언트 조회 오류",
|
||||
"Error getting the host IP address to use from within the VM": "",
|
||||
"Error host driver ip status": "",
|
||||
"Error killing mount process": "",
|
||||
"Error loading api": "api 로딩 오류",
|
||||
"Error loading profile config": "프로필 컨피그 로딩 오류",
|
||||
|
|
@ -148,15 +147,14 @@
|
|||
"Error opening service": "",
|
||||
"Error parsing minikube version: {{.error}}": "minikube 버전 파싱 오류: {{.error}}",
|
||||
"Error reading {{.path}}: {{.error}}": "",
|
||||
"Error retrieving node": "",
|
||||
"Error starting cluster": "클러스터 시작 오류",
|
||||
"Error starting mount": "마운트 시작 오류",
|
||||
"Error starting node": "노드 시작 오류",
|
||||
"Error while setting kubectl current context : {{.error}}": "kubectl current context 설정 오류 : {{.error}}",
|
||||
"Error writing mount pid": "",
|
||||
"Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}\"": "",
|
||||
"Error: [{{.id}}] {{.error}}": "",
|
||||
"Examples": "예시",
|
||||
"Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'": "",
|
||||
"Exiting": "",
|
||||
"Exiting.": "",
|
||||
"External Adapter on which external switch will be created if no external switch is found. (hyperv driver only)": "",
|
||||
|
|
@ -164,6 +162,7 @@
|
|||
"Failed to cache ISO": "ISO 캐싱에 실패하였습니다",
|
||||
"Failed to cache and load images": "이미지 캐싱 및 로딩에 실패하였습니다",
|
||||
"Failed to cache binaries": "바이너리 캐싱에 실패하였습니다",
|
||||
"Failed to cache images": "",
|
||||
"Failed to cache images to tar": "이미지를 tar 로 캐싱하는 데 실패하였습니다",
|
||||
"Failed to cache kubectl": "kubectl 캐싱에 실패하였습니다",
|
||||
"Failed to change permissions for {{.minikube_dir_path}}: {{.error}}": "{{.minikube_dir_path}} 의 권한 변경에 실패하였습니다: {{.error}}",
|
||||
|
|
@ -175,6 +174,7 @@
|
|||
"Failed to delete node {{.name}}": "노드 {{.name}} 제거에 실패하였습니다",
|
||||
"Failed to enable container runtime": "컨테이너 런타임 활성화에 실패하였습니다",
|
||||
"Failed to generate config": "컨피그 생성에 실패하였습니다",
|
||||
"Failed to get API Server URL": "",
|
||||
"Failed to get bootstrapper": "부트스트래퍼 조회에 실패하였습니다",
|
||||
"Failed to get command runner": "",
|
||||
"Failed to get driver URL": "드라이버 URL 조회에 실패하였습니다",
|
||||
|
|
@ -183,6 +183,7 @@
|
|||
"Failed to get service URL: {{.error}}": "서비스 URL 조회에 실패하였습니다: {{.error}}",
|
||||
"Failed to kill mount process: {{.error}}": "마운트 프로세스 중지에 실패하였습니다: {{.error}}",
|
||||
"Failed to list cached images": "캐시된 이미지를 조회하는 데 실패하였습니다",
|
||||
"Failed to parse kubernetes version": "",
|
||||
"Failed to reload cached images": "캐시된 이미지를 다시 불러오는 데 실패하였습니다",
|
||||
"Failed to save config": "컨피그 저장에 실패하였습니다",
|
||||
"Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.": "",
|
||||
|
|
@ -194,6 +195,7 @@
|
|||
"Failed to update config": "컨피그를 수정하는 데 실패하였습니다",
|
||||
"Failed unmount: {{.error}}": "마운트 해제에 실패하였습니다: {{.error}}",
|
||||
"File permissions used for the mount": "",
|
||||
"Filter to use only VM Drivers": "",
|
||||
"Flags": "",
|
||||
"Follow": "",
|
||||
"For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "",
|
||||
|
|
@ -202,13 +204,17 @@
|
|||
"Force minikube to perform possibly dangerous operations": "",
|
||||
"Found network options:": "네트워크 옵션을 찾았습니다",
|
||||
"Found {{.number}} invalid profile(s) !": "{{.number}} 개의 무효한 프로필을 찾았습니다",
|
||||
"Generate unable to parse disk size '{{.diskSize}}': {{.error}}": "",
|
||||
"Generate unable to parse memory '{{.memory}}': {{.error}}": "",
|
||||
"Gets the kubernetes URL(s) for the specified service in your local cluster": "",
|
||||
"Gets the kubernetes URL(s) for the specified service in your local cluster. In the case of multiple URLs they will be printed one at a time.": "",
|
||||
"Gets the logs of the running instance, used for debugging minikube, not user code.": "",
|
||||
"Gets the status of a local kubernetes cluster": "",
|
||||
"Gets the status of a local kubernetes cluster.\n\tExit status contains the status of minikube's VM, cluster and kubernetes encoded on it's bits in this order from right to left.\n\tEg: 7 meaning: 1 (for minikube NOK) + 2 (for cluster NOK) + 4 (for kubernetes NOK)": "",
|
||||
"Gets the value of PROPERTY_NAME from the minikube config file": "",
|
||||
"Getting bootstrapper": "",
|
||||
"Getting machine config failed": "머신 컨피그 조회 실패",
|
||||
"Getting primary control plane": "",
|
||||
"Global Flags": "",
|
||||
"Go template format string for the cache list output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#CacheListTemplate": "",
|
||||
"Go template format string for the config view output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate": "",
|
||||
|
|
@ -219,6 +225,7 @@
|
|||
"Hyperkit is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "",
|
||||
"Hyperkit networking is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "",
|
||||
"If set, automatically updates drivers to the latest version. Defaults to true.": "",
|
||||
"If set, download tarball of preloaded images if available to improve start time. Defaults to true.": "",
|
||||
"If set, install addons. Defaults to true.": "",
|
||||
"If set, pause all namespaces": "",
|
||||
"If set, unpause all namespaces": "",
|
||||
|
|
@ -233,8 +240,9 @@
|
|||
"Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added.": "",
|
||||
"Install VirtualBox, or select an alternative value for --driver": "",
|
||||
"Install the latest hyperkit binary, and run 'minikube delete'": "",
|
||||
"Invalid size passed in argument: {{.error}}": "",
|
||||
"IsEnabled failed": "",
|
||||
"Istio needs {{.minCPUs}} CPUs -- your configuration only allocates {{.cpus}} CPUs": "",
|
||||
"Istio needs {{.minMem}}MB of memory -- your configuration only allocates {{.memory}}MB": "",
|
||||
"Kill the mount process spawned by minikube start": "",
|
||||
"Kubernetes {{.new}} is now available. If you would like to upgrade, specify: --kubernetes-version={{.new}}": "",
|
||||
"Kubernetes {{.version}} is not supported by this release of minikube": "",
|
||||
|
|
@ -248,7 +256,7 @@
|
|||
"Lists the URLs for the services in your local cluster": "",
|
||||
"Local folders to share with Guest via NFS mounts (hyperkit driver only)": "",
|
||||
"Location of the VPNKit socket used for networking. If empty, disables Hyperkit VPNKitSock, if 'auto' uses Docker for Mac VPNKit connection, otherwise uses the specified VSock (hyperkit driver only)": "",
|
||||
"Location of the minikube iso.": "",
|
||||
"Locations to fetch the minikube ISO from.": "",
|
||||
"Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'": "",
|
||||
"Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'.": "",
|
||||
"Message Size: {{.size}}": "",
|
||||
|
|
@ -266,14 +274,17 @@
|
|||
"NOTE: This process must stay alive for the mount to be accessible ...": "",
|
||||
"Networking and Connectivity Commands:": "",
|
||||
"No minikube profile was found. You can create one using `minikube start`.": "",
|
||||
"Node \"{{.node_name}}\" stopped.": "",
|
||||
"Node may be unable to resolve external DNS records": "",
|
||||
"Node operations": "",
|
||||
"Node {{.name}} was successfully deleted.": "",
|
||||
"Node {{.nodeName}} does not exist.": "",
|
||||
"Non-destructive downgrades are not supported, but you can proceed with one of the following options:\n\n 1) Recreate the cluster with Kubernetes v{{.new}}, by running:\n\n minikube delete{{.profile}}\n minikube start{{.profile}} --kubernetes-version={{.new}}\n\n 2) Create a second cluster with Kubernetes v{{.new}}, by running:\n\n minikube start -p {{.suggestedName}} --kubernetes-version={{.new}}\n\n 3) Use the existing cluster at version Kubernetes v{{.old}}, by running:\n\n minikube start{{.profile}} --kubernetes-version={{.old}}": "",
|
||||
"None of the known repositories in your location are accessible. Using {{.image_repository_name}} as fallback.": "",
|
||||
"None of the known repositories is accessible. Consider specifying an alternative image repository with --image-repository flag": "",
|
||||
"Not passing {{.name}}={{.value}} to docker env.": "",
|
||||
"Noticed that you are using minikube docker-env:": "",
|
||||
"Number of CPUs allocated to the minikube VM.": "",
|
||||
"Noticed you have an activated docker-env on {{.driver_name}} driver in this terminal:": "",
|
||||
"Number of CPUs allocated to Kubernetes.": "",
|
||||
"Number of lines back to go within the log": "",
|
||||
"OS release is {{.pretty_name}}": "",
|
||||
"Open the addons URL with https instead of http": "",
|
||||
|
|
@ -294,6 +305,7 @@
|
|||
"Please install the minikube hyperkit VM driver, or select an alternative --driver": "",
|
||||
"Please install the minikube kvm2 VM driver, or select an alternative --driver": "",
|
||||
"Please make sure the service you are looking for is deployed or is in the correct namespace.": "",
|
||||
"Please re-eval your docker-env, To ensure your environment variables have updated ports: \n\n\t'minikube -p {{.profile_name}} docker-env'": "",
|
||||
"Please specify the directory to be mounted: \n\tminikube mount \u003csource directory\u003e:\u003ctarget directory\u003e (example: \"/host-home:/vm-home\")": "",
|
||||
"Populates the specified folder with documentation in markdown about minikube": "",
|
||||
"Powering off \"{{.profile_name}}\" via SSH ...": "",
|
||||
|
|
@ -306,10 +318,10 @@
|
|||
"Profile gets or sets the current minikube profile": "",
|
||||
"Profile name \"{{.profilename}}\" is minikube keyword. To delete profile use command minikube delete -p \u003cprofile name\u003e": "",
|
||||
"Provide VM UUID to restore MAC address (hyperkit driver only)": "",
|
||||
"Pulling base image ...": "",
|
||||
"Reboot to complete VirtualBox installation, verify that VirtualBox is not blocked by your system, and/or use another hypervisor": "",
|
||||
"Rebuild libvirt with virt-network support": "",
|
||||
"Received {{.name}} signal": "",
|
||||
"Reconfiguring existing host ...": "",
|
||||
"Registry mirrors to pass to the Docker daemon": "",
|
||||
"Reinstall VirtualBox and reboot. Alternatively, try the kvm2 driver: https://minikube.sigs.k8s.io/docs/reference/drivers/kvm2/": "",
|
||||
"Reinstall VirtualBox and verify that it is not blocked: System Preferences -\u003e Security \u0026 Privacy -\u003e General -\u003e Some system software was blocked from loading": "",
|
||||
|
|
@ -318,8 +330,9 @@
|
|||
"Removing {{.directory}} ...": "",
|
||||
"Requested cpu count {{.requested_cpus}} is less than the minimum allowed of {{.minimum_cpus}}": "",
|
||||
"Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}": "",
|
||||
"Requested memory allocation ({{.memory}}MB) is less than the default memory allocation of {{.default_memorysize}}MB. Beware that minikube might not work correctly or crash unexpectedly.": "",
|
||||
"Requested memory allocation {{.requested_size}} is less than the minimum allowed of {{.minimum_size}}": "",
|
||||
"Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommended}}MB. Kubernetes may crash unexpectedly.": "",
|
||||
"Requested memory allocation {{.requested}}MB is less than the usable minimum of {{.minimum}}MB": "",
|
||||
"Retarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "",
|
||||
"Retrieve the ssh identity key path of the specified cluster": "",
|
||||
"Retrieve the ssh identity key path of the specified cluster.": "",
|
||||
"Retrieves the IP address of the running cluster": "",
|
||||
|
|
@ -332,8 +345,10 @@
|
|||
"Run minikube from the C: drive.": "",
|
||||
"Run the kubernetes client, download it if necessary. Remember -- after kubectl!\n\nExamples:\nminikube kubectl -- --help\nminikube kubectl -- get pods --namespace kube-system": "",
|
||||
"Run the minikube command as an Administrator": "minikube 명령어를 관리자 권한으로 실행합니다",
|
||||
"Run: \"{{.delete}}\", then \"{{.start}} --alsologtostderr -v=1\" to try again with more logging": "",
|
||||
"Run: 'chmod 600 $HOME/.kube/config'": "",
|
||||
"Running on localhost (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "",
|
||||
"Service '{{.service}}' was not found in '{{.namespace}}' namespace.\nYou may select another namespace by using 'minikube service {{.service}} -n \u003cnamespace\u003e'. Or list out all the services using 'minikube service list'": "",
|
||||
"Set failed": "설정이 실패하였습니다",
|
||||
"Set flag to delete all profiles": "",
|
||||
"Set this flag to delete the '.minikube' folder from your user directory.": "",
|
||||
|
|
@ -359,7 +374,8 @@
|
|||
"Specify the 9p version that the mount should use": "",
|
||||
"Specify the ip that the mount should be setup on": "",
|
||||
"Specify the mount filesystem type (supported types: 9p)": "",
|
||||
"Starting existing {{.driver_name}} VM for \"{{.profile_name}}\" ...": "",
|
||||
"StartHost failed again: {{.error}}": "",
|
||||
"StartHost failed, but will try again: {{.error}}": "",
|
||||
"Starting node": "노드를 시작하는 중",
|
||||
"Starting tunnel for service {{.service}}.": "",
|
||||
"Starts a local kubernetes cluster": "로컬 쿠버네티스 클러스터를 시작합니다",
|
||||
|
|
@ -373,7 +389,6 @@
|
|||
"Successfully added {{.name}} to {{.cluster}}!": "{{.name}} 를 {{.cluster}} 에 성공적으로 추가하였습니다!",
|
||||
"Successfully deleted all profiles": "모든 프로필이 성공적으로 삭제되었습니다",
|
||||
"Successfully mounted {{.sourcePath}} to {{.destinationPath}}": "",
|
||||
"Successfully powered off Hyper-V. minikube driver -- {{.driver}}": "",
|
||||
"Successfully purged minikube directory located at - [{{.minikubeDirectory}}]": "",
|
||||
"Suggestion: {{.advice}}": "",
|
||||
"Suggestion: {{.fix}}": "",
|
||||
|
|
@ -399,24 +414,32 @@
|
|||
"The argument to pass the minikube mount command on start.": "",
|
||||
"The cluster dns domain name used in the kubernetes cluster": "",
|
||||
"The container runtime to be used (docker, crio, containerd).": "",
|
||||
"The control plane for \"{{.name}}\" is paused!": "",
|
||||
"The control plane node \"{{.name}}\" does not exist.": "",
|
||||
"The control plane node is not running (state={{.state}})": "",
|
||||
"The control plane node must be running for this command": "",
|
||||
"The cri socket path to be used.": "",
|
||||
"The docker service within '{{.profile}}' is not active": "",
|
||||
"The driver {{.experimental}} '{{.driver}}' is not supported on {{.os}}": "",
|
||||
"The existing \"{{.profile_name}}\" VM that was created using the \"{{.old_driver}}\" driver, and is incompatible with the \"{{.driver}}\" driver.": "",
|
||||
"The docker service within '{{.name}}' is not active": "",
|
||||
"The docker-env command is only compatible with the \"docker\" runtime, but this cluster was configured to use the \"{{.runtime}}\" runtime.": "",
|
||||
"The driver '{{.driver}}' is not supported on {{.os}}": "",
|
||||
"The existing \"{{.name}}\" VM was created using the \"{{.old}}\" driver, and is incompatible with the \"{{.new}}\" driver.": "",
|
||||
"The hyperv virtual switch name. Defaults to first found. (hyperv driver only)": "",
|
||||
"The hypervisor does not appear to be configured properly. Run 'minikube start --alsologtostderr -v=1' and inspect the error code": "",
|
||||
"The initial time interval for each check that wait performs in seconds": "",
|
||||
"The kubernetes version that the minikube VM will use (ex: v1.2.3)": "",
|
||||
"The machine-driver specified is failing to start. Try running 'docker-machine-driver-\u003ctype\u003e version'": "",
|
||||
"The minikube VM is offline. Please run 'minikube start' to start it again.": "",
|
||||
"The name of the network plugin.": "",
|
||||
"The name of the node to delete": "",
|
||||
"The name of the node to start": "",
|
||||
"The node to get logs from. Defaults to the primary control plane.": "",
|
||||
"The node to ssh into. Defaults to the primary control plane.": "",
|
||||
"The none driver is not compatible with multi-node clusters.": "",
|
||||
"The none driver requires conntrack to be installed for kubernetes version {{.k8sVersion}}": "",
|
||||
"The number of bytes to use for 9p packet payload": "",
|
||||
"The number of nodes to spin up. Defaults to 1.": "",
|
||||
"The output format. One of 'json', 'table'": "",
|
||||
"The path on the file system where the docs in markdown need to be saved": "",
|
||||
"The podman service within '{{.profile}}' is not active": "",
|
||||
"The podman service within '{{.cluster}}' is not active": "",
|
||||
"The service namespace": "",
|
||||
"The service {{.service}} requires privileged ports to be exposed: {{.ports}}": "",
|
||||
"The services namespace": "",
|
||||
|
|
@ -424,16 +447,20 @@
|
|||
"The value passed to --format is invalid": "",
|
||||
"The value passed to --format is invalid: {{.error}}": "",
|
||||
"The vmwarefusion driver is deprecated and support for it will be removed in a future release.\n\t\t\tPlease consider switching to the new vmware unified driver, which is intended to replace the vmwarefusion driver.\n\t\t\tSee https://minikube.sigs.k8s.io/docs/reference/drivers/vmware/ for more information.\n\t\t\tTo disable this message, run [minikube config set ShowDriverDeprecationNotification false]": "",
|
||||
"There is no local cluster named \"{{.cluster}}\"": "",
|
||||
"These changes will take effect upon a minikube delete and then a minikube start": "",
|
||||
"This addon does not have an endpoint defined for the 'addons open' command.\nYou can add one by annotating a service with the label {{.labelName}}:{{.addonName}}": "",
|
||||
"This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true": "",
|
||||
"This control plane is not running! (state={{.state}})": "",
|
||||
"This is unusual - you may want to investigate using \"{{.command}}\"": "",
|
||||
"This will keep the existing kubectl context and will create a minikube context.": "",
|
||||
"This will start the mount daemon and automatically mount files into minikube.": "",
|
||||
"Tip: To remove this root owned cluster, run: sudo {{.cmd}} delete": "",
|
||||
"Tip: To remove this root owned cluster, run: sudo {{.cmd}}": "",
|
||||
"To connect to this cluster, use: kubectl --context={{.name}}": "",
|
||||
"To connect to this cluster, use: kubectl --context={{.profile_name}}": "",
|
||||
"To disable this notice, run: 'minikube config set WantUpdateNotification false'\\n": "",
|
||||
"To proceed, either:\n\n 1) Delete the existing \"{{.profile_name}}\" cluster using: '{{.command}} delete'\n\n * or *\n\n 2) Start the existing \"{{.profile_name}}\" cluster using: '{{.command}} start --driver={{.old_driver}}'": "",
|
||||
"To fix this, run: {{.command}}": "",
|
||||
"To proceed, either:\n\n1) Delete the existing \"{{.name}}\" cluster using: '{{.delcommand}}'\n\n* or *\n\n2) Start the existing \"{{.name}}\" cluster using: '{{.command}} --driver={{.old}}'": "",
|
||||
"To see addons list for other profiles use: `minikube addons -p name list`": "",
|
||||
"To start minikube with HyperV Powershell must be in your PATH`": "",
|
||||
"To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:": "",
|
||||
|
|
@ -443,21 +470,31 @@
|
|||
"Unable to determine a default driver to use. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/": "",
|
||||
"Unable to enable dashboard": "대시보드를 활성화할 수 없습니다",
|
||||
"Unable to fetch latest version info": "최신 버전 정보를 가져올 수 없습니다",
|
||||
"Unable to find control plane": "",
|
||||
"Unable to generate docs": "문서를 생성할 수 없습니다",
|
||||
"Unable to generate the documentation. Please ensure that the path specified is a directory, exists \u0026 you have permission to write to it.": "",
|
||||
"Unable to get VM IP address": "가상 머신 IP 주소를 조회할 수 없습니다",
|
||||
"Unable to get addon status for {{.name}}: {{.error}}": "",
|
||||
"Unable to get command runner": "",
|
||||
"Unable to get control plane status: {{.error}}": "",
|
||||
"Unable to get current user": "현재 사용자를 조회할 수 없습니다",
|
||||
"Unable to get driver IP": "",
|
||||
"Unable to get machine status": "",
|
||||
"Unable to get runtime": "런타임을 조회할 수 없습니다",
|
||||
"Unable to get the status of the {{.name}} cluster.": "{{.name}} 클러스터의 상태를 조회할 수 없습니다",
|
||||
"Unable to kill mount process: {{.error}}": "마운트 프로세스를 중지할 수 없습니다: {{.error}}",
|
||||
"Unable to load cached images from config file.": "컨피그 파일로부터 캐시된 이미지를 로드할 수 없습니다",
|
||||
"Unable to load cached images: {{.error}}": "캐시된 이미지를 로드할 수 없습니다: {{.error}}",
|
||||
"Unable to load config: {{.error}}": "컨피그를 로드할 수 없습니다: {{.error}}",
|
||||
"Unable to load host": "",
|
||||
"Unable to parse \"{{.kubernetes_version}}\": {{.error}}": " \"{{.kubernetes_version}}\" 를 파싱할 수 없습니다: {{.error}}",
|
||||
"Unable to parse default Kubernetes version from constants: {{.error}}": "",
|
||||
"Unable to parse memory '{{.memory}}': {{.error}}": "",
|
||||
"Unable to parse oldest Kubernetes version from constants: {{.error}}": "",
|
||||
"Unable to remove machine directory": "",
|
||||
"Unable to remove machine directory: %v": "머신 디렉토리를 제거할 수 없습니다: %v",
|
||||
"Unable to restart cluster, will reset it: {{.error}}": "",
|
||||
"Unable to start VM after repeated tries. Please try {{'minikube delete' if possible": "",
|
||||
"Unable to start VM. Please investigate and run 'minikube delete' if possible": "가상 머신을 시작할 수 없습니다. 확인 후 가능하면 'minikube delete' 를 실행하세요",
|
||||
"Unable to stop VM": "가상 머신을 중지할 수 없습니다",
|
||||
"Unable to update {{.driver}} driver: {{.error}}": "{{.driver}} 를 수정할 수 없습니다: {{.error}}",
|
||||
|
|
@ -469,6 +506,8 @@
|
|||
"Unset the KUBECONFIG environment variable, or verify that it does not point to an empty or otherwise invalid path": "",
|
||||
"Unset variables instead of setting them": "",
|
||||
"Update server returned an empty list": "",
|
||||
"Updating node": "",
|
||||
"Updating the running {{.driver_name}} \"{{.cluster}}\" {{.machine_type}} ...": "",
|
||||
"Upgrade to QEMU v3.1.0+, run 'virt-host-validate', or ensure that you are not running in a nested VM environment.": "",
|
||||
"Usage": "",
|
||||
"Usage: minikube completion SHELL": "",
|
||||
|
|
@ -488,10 +527,10 @@
|
|||
"Userspace file server:": "",
|
||||
"Using image repository {{.name}}": "",
|
||||
"Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!": "",
|
||||
"Using the running {{.driver_name}} \"{{.profile_name}}\" VM ...": "",
|
||||
"Using the {{.driver}} driver based on existing profile": "",
|
||||
"Using the {{.driver}} driver based on user configuration": "",
|
||||
"VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository": "",
|
||||
"Validation unable to parse disk size '{{.diskSize}}': {{.error}}": "",
|
||||
"Verify that your HTTP_PROXY and HTTPS_PROXY environment variables are set correctly.": "",
|
||||
"Verify the IP address of the running cluster in kubeconfig.": "",
|
||||
"Verifying dashboard health ...": "",
|
||||
|
|
@ -509,7 +548,9 @@
|
|||
"Where to root the NFS Shares, defaults to /nfsshares (hyperkit driver only)": "",
|
||||
"Whether to use external switch over Default Switch if virtual switch not explicitly specified. (hyperv driver only)": "",
|
||||
"You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details": "",
|
||||
"You can also use 'minikube kubectl -- get pods' to invoke a matching version": "",
|
||||
"You can delete them using the following command(s):": "다음 커맨드(들)을 사용하여 제거할 수 있습니다",
|
||||
"You have selected Kubernetes v{{.new}}, but the existing cluster is running Kubernetes v{{.old}}": "",
|
||||
"You may need to manually remove the \"{{.name}}\" VM from your hypervisor": "",
|
||||
"You may need to stop the Hyper-V Manager and run `minikube delete` again.": "",
|
||||
"You must specify a service name": "service 이름을 명시해야 합니다",
|
||||
|
|
@ -518,17 +559,17 @@
|
|||
"Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.": "",
|
||||
"Your minikube config refers to an unsupported driver. Erase ~/.minikube, and try again.": "minikube config 가 미지원 드라이버를 참조하고 있습니다. ~/.minikube 를 제거한 후, 다시 시도하세요",
|
||||
"Your minikube vm is not running, try minikube start.": "minikube 가상 머신이 실행 중이 아닙니다, minikube start 를 시도하세요",
|
||||
"adding node": "",
|
||||
"addon '{{.name}}' is currently not enabled.\nTo enable this addon run:\nminikube addons enable {{.name}}": "",
|
||||
"addon '{{.name}}' is not a valid addon packaged with minikube.\nTo see the list of available addons run:\nminikube addons list": "",
|
||||
"addons modifies minikube addons files using subcommands like \"minikube addons enable dashboard\"": "",
|
||||
"api load": "",
|
||||
"bash completion failed": "bash 자동 완성이 실패하였습니다",
|
||||
"call with cleanup=true to remove old tunnels": "",
|
||||
"command runner": "",
|
||||
"config modifies minikube config files using subcommands like \"minikube config set driver kvm\"\nConfigurable fields:\\n\\n": "",
|
||||
"config view failed": "config view 가 실패하였습니다",
|
||||
"creating api client": "api 클라이언트 생성 중",
|
||||
"dashboard service is not running: {{.error}}": "대시보드 서비스가 실행 중이지 않습니다: {{.error}}",
|
||||
"deleting node": "",
|
||||
"disable failed": "비활성화가 실패하였습니다",
|
||||
"dry-run mode. Validates configuration, but does not mutate system state": "",
|
||||
"dry-run validation complete!": "dry-run 검증 완료!",
|
||||
|
|
@ -541,14 +582,17 @@
|
|||
"error starting tunnel": "",
|
||||
"error stopping tunnel": "",
|
||||
"failed to open browser: {{.error}}": "",
|
||||
"generating join token": "",
|
||||
"getting config": "컨피그 조회 중",
|
||||
"getting primary control plane": "",
|
||||
"if true, will embed the certs in kubeconfig.": "",
|
||||
"if you want to create a profile you can by this command: minikube start -p {{.profile_name}}": "프로필을 생성하려면 다음 커맨드를 입력하세요: minikube start -p {{.profile_name}}\"",
|
||||
"initialization failed, will try again: {{.error}}": "",
|
||||
"joining cluster": "",
|
||||
"kubeadm detected a TCP port conflict with another process: probably another local Kubernetes installation. Run lsof -p\u003cport\u003e to find the process and kill it": "",
|
||||
"kubectl and minikube configuration will be stored in {{.home_folder}}": "kubectl 과 minikube 환경 정보는 {{.home_folder}} 에 저장될 것입니다",
|
||||
"kubectl not found in PATH, but is required for the dashboard. Installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "kubectl 이 PATH 에 없습니다, 하지만 이는 대시보드에서 필요로 합니다. 설치 가이드:https://kubernetes.io/docs/tasks/tools/install-kubectl/",
|
||||
"kubectl proxy": "kubectl 프록시",
|
||||
"libmachine failed": "",
|
||||
"loading config": "컨피그 로딩 중",
|
||||
"logdir set failed": "logdir 설정이 실패하였습니다",
|
||||
"machine '{{.name}}' does not exist. Proceeding ahead with recreating VM.": "머신 '{{.name}}' 이 존재하지 않습니다. 진행하기 앞서 가상 머신을 재생성합니다",
|
||||
|
|
@ -556,7 +600,7 @@
|
|||
"minikube addons list --output OUTPUT. json, list": "",
|
||||
"minikube is exiting due to an error. If the above message is not useful, open an issue:": "",
|
||||
"minikube is unable to access the Google Container Registry. You may need to configure it to use a HTTP proxy.": "",
|
||||
"minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --driver\n\t- Use --force to override this connectivity check": "",
|
||||
"minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --vm-driver\n\t- Use --force to override this connectivity check": "",
|
||||
"minikube profile was successfully set to {{.profile_name}}": "",
|
||||
"minikube status --output OUTPUT. json, text": "",
|
||||
"minikube {{.version}} is available! Download it: {{.url}}": "",
|
||||
|
|
@ -565,14 +609,16 @@
|
|||
"mount failed": "",
|
||||
"namespaces to pause": "",
|
||||
"namespaces to unpause": "",
|
||||
"none driver does not support multi-node clusters": "",
|
||||
"not enough arguments ({{.ArgCount}}).\\nusage: minikube config set PROPERTY_NAME PROPERTY_VALUE": "",
|
||||
"pause containers": "",
|
||||
"profile sets the current minikube profile, or gets the current profile if no arguments are provided. This is used to run and manage multiple minikube instance. You can return to the default minikube profile by running `minikube profile default`": "",
|
||||
"profile {{.name}} is not running.": "",
|
||||
"reload cached images.": "",
|
||||
"reloads images previously added using the 'cache add' subcommand": "",
|
||||
"retrieving node": "",
|
||||
"saving node": "",
|
||||
"service {{.namespace_name}}/{{.service_name}} has no node port": "",
|
||||
"setting up certs": "",
|
||||
"stat failed": "",
|
||||
"status json failure": "",
|
||||
"status text failure": "",
|
||||
|
|
@ -597,16 +643,17 @@
|
|||
"usage: minikube delete": "",
|
||||
"usage: minikube profile [MINIKUBE_PROFILE_NAME]": "",
|
||||
"zsh completion failed": "",
|
||||
"{{.cluster}} IP has been updated to point at {{.ip}}": "",
|
||||
"{{.cluster}} IP was already correctly configured for {{.ip}}": "",
|
||||
"{{.driver_name}} \"{{.cluster}}\" {{.machine_type}} is missing, will recreate.": "",
|
||||
"{{.driver}} does not appear to be installed": "{{.driver}} 가 설치되지 않았습니다",
|
||||
"{{.driver}} does not appear to be installed, but is specified by an existing profile. Please run 'minikube delete' or install {{.driver}}": "",
|
||||
"{{.extra_option_component_name}}.{{.key}}={{.value}}": "",
|
||||
"{{.machine}} IP has been updated to point at {{.ip}}": "",
|
||||
"{{.machine}} IP was already correctly configured for {{.ip}}": "",
|
||||
"{{.name}} cluster does not exist": "{{.name}} 클러스터가 존재하지 않습니다",
|
||||
"{{.name}} has no available configuration options": "{{.driver}} 이 사용 가능한 환경 정보 옵션이 없습니다",
|
||||
"{{.name}} is already running": "{{.driver}} 이 이미 실행 중입니다",
|
||||
"{{.name}} was successfully configured": "{{.driver}} 이 성공적으로 설정되었습니다",
|
||||
"{{.path}} is version {{.client_version}}, and is incompatible with Kubernetes {{.cluster_version}}. You will need to update {{.path}} or use 'minikube kubectl' to connect with this cluster": "",
|
||||
"{{.path}} is v{{.client_version}}, which may be incompatible with Kubernetes v{{.cluster_version}}.": "",
|
||||
"{{.prefix}}minikube {{.version}} on {{.platform}}": "",
|
||||
"{{.type}} is not yet a supported filesystem. We will try anyways!": "",
|
||||
"{{.url}} is not accessible: {{.error}}": "{{.url}} 이 접근 불가능합니다: {{.error}}"
|
||||
|
|
|
|||
|
|
@ -1,11 +1,10 @@
|
|||
{
|
||||
"\"The '{{.minikube_addon}}' addon is disabled": "",
|
||||
"\"{{.machineName}}\" does not exist, nothing to stop": "",
|
||||
"\"{{.minikube_addon}}\" was successfully disabled": "\"{{.minikube_addon}}\" został wyłaczony",
|
||||
"\"{{.name}}\" profile does not exist": "Profil \"{{.name}}\" nie istnieje",
|
||||
"\"{{.name}}\" profile does not exist, trying anyways.": "",
|
||||
"\"{{.node_name}}\" stopped.": "",
|
||||
"\"{{.profile_name}}\" VM does not exist, nothing to stop": "Maszyna wirtualna \"{{.profile_name}}\" nie istnieje. Nie można zatrzymać",
|
||||
"\"{{.profile_name}}\" does not exist, nothing to stop": "",
|
||||
"\"{{.profile_name}}\" host does not exist, unable to show an IP": "Profil \"{{.profile_name}}\" nie istnieje. Nie można wyświetlić adresu IP ",
|
||||
"\"{{.profile_name}}\" stopped.": "Zatrzymano \"{{.profile_name}}\"",
|
||||
"'none' driver does not support 'minikube docker-env' command": "sterownik 'none' nie wspiera komendy 'minikube docker-env'",
|
||||
|
|
@ -13,7 +12,6 @@
|
|||
"'none' driver does not support 'minikube podman-env' command": "",
|
||||
"'none' driver does not support 'minikube ssh' command": "sterownik 'none' nie wspiera komendy 'minikube ssh'",
|
||||
"'{{.driver}}' driver reported an issue: {{.error}}": "",
|
||||
"'{{.profile}}' is not running": "",
|
||||
"- {{.profile}}": "",
|
||||
"A VPN or firewall is interfering with HTTP access to the minikube VM. Alternatively, try a different VM driver: https://minikube.sigs.k8s.io/docs/start/": "",
|
||||
"A firewall is blocking Docker the minikube VM from reaching the internet. You may need to configure it to use a proxy.": "",
|
||||
|
|
@ -32,12 +30,12 @@
|
|||
"Adds a node to the given cluster config, and starts it.": "",
|
||||
"Adds a node to the given cluster.": "",
|
||||
"Advanced Commands:": "Zaawansowane komendy",
|
||||
"After minikube restart the dockerd ports might have changed. To ensure docker-env works properly.\nPlease re-eval the docker-env command:\n\n\t'minikube -p {{.profile_name}} docker-env'": "",
|
||||
"Aliases": "Aliasy",
|
||||
"Allow user prompts for more information": "",
|
||||
"Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to \\\"auto\\\" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers": "",
|
||||
"Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g)": "Ilość zarezerwowanej pamieci RAM dla maszyny wirtualnej minikube (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or )",
|
||||
"Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "Ilość zarezerwowanej pamieci RAM dla maszyny wirtualnej minikube (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or )",
|
||||
"Amount of RAM to allocate to Kubernetes (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "",
|
||||
"Amount of time to wait for a service in seconds": "Czas oczekiwania na serwis w sekundach",
|
||||
"Amount of time to wait for service in seconds": "Czas oczekiwania na servis w sekundach",
|
||||
"Another hypervisor, such as VirtualBox, is conflicting with KVM. Please stop the other hypervisor, or use --driver to switch to it.": "",
|
||||
|
|
@ -48,6 +46,7 @@
|
|||
"Because you are using docker driver on Mac, the terminal needs to be open to run it.": "",
|
||||
"Bind Address: {{.Address}}": "",
|
||||
"Block until the apiserver is servicing API requests": "",
|
||||
"Both driver={{.driver}} and vm-driver={{.vmd}} have been set.\n\n Since vm-driver is deprecated, minikube will default to driver={{.driver}}.\n\n If vm-driver is set in the global config, please run \"minikube config unset vm-driver\" to resolve this warning.": "",
|
||||
"Cannot find directory {{.path}} for mount": "Nie można odnoleść folderu {{.path}} do zamontowania",
|
||||
"Cannot use both --output and --format options": "",
|
||||
"Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start": "",
|
||||
|
|
@ -68,10 +67,11 @@
|
|||
"Could not process errors from failed deletion": "",
|
||||
"Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.": "",
|
||||
"Created a new profile : {{.profile_name}}": "Stworzono nowy profil : {{.profile_name}}",
|
||||
"Creating Kubernetes in {{.driver_name}} container with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "",
|
||||
"Creating Kubernetes in {{.driver_name}} {{.machine_type}} with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "",
|
||||
"Creating a new profile failed": "Tworzenie nowego profilu nie powiodło się",
|
||||
"Creating mount {{.name}} ...": "",
|
||||
"Creating {{.driver_name}} VM (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "Tworzenie {{.driver_name}} (CPUs={{.number_of_cpus}}, Pamięć={{.memory_size}}MB, Dysk={{.disk_size}}MB)...",
|
||||
"Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "",
|
||||
"DEPRECATED, use `driver` instead.": "",
|
||||
"Default group id used for the mount": "Domyślne id groupy użyte dla montowania",
|
||||
"Default user id used for the mount": "Domyślne id użytkownia użyte dla montowania ",
|
||||
|
|
@ -98,9 +98,9 @@
|
|||
"Done! kubectl is now configured to use \"{{.name}}": "Gotowe! kubectl jest skonfigurowany do użycia z \"{{.name}}\".",
|
||||
"Done! kubectl is now configured to use \"{{.name}}\"": "Gotowe! kubectl jest skonfigurowany do użycia z \"{{.name}}\".",
|
||||
"Download complete!": "Pobieranie zakończone!",
|
||||
"Downloading Kubernetes {{.version}} preload ...": "",
|
||||
"Downloading VM boot image ...": "Pobieranie obrazu maszyny wirtualnej ...",
|
||||
"Downloading driver {{.driver}}:": "",
|
||||
"Downloading preloaded images tarball for k8s {{.version}} ...": "",
|
||||
"Downloading {{.name}} {{.version}}": "Pobieranie {{.name}} {{.version}}",
|
||||
"ERROR creating `registry-creds-acr` secret": "",
|
||||
"ERROR creating `registry-creds-dpr` secret": "",
|
||||
|
|
@ -110,7 +110,6 @@
|
|||
"Enable addons. see `minikube addons list` for a list of valid addon names.": "",
|
||||
"Enable experimental NVIDIA GPU support in minikube": "aktywuj eksperymentalne wsparcie minikube dla NVIDIA GPU",
|
||||
"Enable host resolver for NAT DNS requests (virtualbox driver only)": "",
|
||||
"Enable istio needs {{.minMem}} MB of memory and {{.minCpus}} CPUs.": "",
|
||||
"Enable proxy for NAT DNS requests (virtualbox driver only)": "",
|
||||
"Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \\\"--network-plugin=cni\\\".": "",
|
||||
"Enables the addon w/ADDON_NAME within minikube (example: minikube addons enable dashboard). For a list of available addons use: minikube addons list": "",
|
||||
|
|
@ -131,46 +130,30 @@
|
|||
"Error finding port for mount": "",
|
||||
"Error generating set output": "",
|
||||
"Error generating unset output": "",
|
||||
"Error getting IP": "",
|
||||
"Error getting client": "",
|
||||
"Error getting client: {{.error}}": "",
|
||||
"Error getting cluster": "",
|
||||
"Error getting cluster bootstrapper": "",
|
||||
"Error getting cluster config": "",
|
||||
"Error getting config": "",
|
||||
"Error getting control plane": "",
|
||||
"Error getting host": "",
|
||||
"Error getting host IP": "",
|
||||
"Error getting host status": "",
|
||||
"Error getting machine logs": "",
|
||||
"Error getting port binding for '{{.driver_name}} driver: {{.error}}": "",
|
||||
"Error getting primary control plane": "",
|
||||
"Error getting primary cp": "",
|
||||
"Error getting service status": "",
|
||||
"Error getting service with namespace: {{.namespace}} and labels {{.labelName}}:{{.addonName}}: {{.error}}": "",
|
||||
"Error getting ssh client": "",
|
||||
"Error getting the host IP address to use from within the VM": "",
|
||||
"Error host driver ip status": "",
|
||||
"Error killing mount process": "",
|
||||
"Error loading api": "",
|
||||
"Error loading profile config": "",
|
||||
"Error loading profile config: {{.error}}": "",
|
||||
"Error opening service": "",
|
||||
"Error parsing Driver version: {{.error}}": "Błąd parsowania wersji Driver: {{.error}}",
|
||||
"Error parsing minikube version: {{.error}}": "Bład parsowania wersji minikube: {{.error}}",
|
||||
"Error reading {{.path}}: {{.error}}": "Błąd odczytu {{.path}} {{.error}}",
|
||||
"Error restarting cluster": "Błąd podczas restartowania klastra",
|
||||
"Error retrieving node": "",
|
||||
"Error setting shell variables": "Błąd podczas ustawiania zmiennych powłoki(shell)",
|
||||
"Error starting cluster": "Błąd podczas uruchamiania klastra",
|
||||
"Error starting mount": "",
|
||||
"Error starting node": "",
|
||||
"Error while setting kubectl current context : {{.error}}": "Błąd podczas ustawiania kontekstu kubectl: {{.error}}",
|
||||
"Error writing mount pid": "",
|
||||
"Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}\"": "",
|
||||
"Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}": "Erreur : Vous avez sélectionné Kubernetes v{{.new}}, mais le cluster existent pour votre profil exécute Kubernetes v{{.old}}. Les rétrogradations non-destructives ne sont pas compatibles. Toutefois, vous pouvez poursuivre le processus en réalisant l'une des trois actions suivantes :\n* Créer à nouveau le cluster en utilisant Kubernetes v{{.new}} – exécutez \"minikube delete {{.profile}}\", puis \"minikube start {{.profile}} --kubernetes-version={{.new}}\".\n* Créer un second cluster avec Kubernetes v{{.new}} – exécutez \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\".\n* Réutiliser le cluster existent avec Kubernetes v{{.old}} ou version ultérieure – exécutez \"minikube start {{.profile}} --kubernetes-version={{.old}}\".",
|
||||
"Error: [{{.id}}] {{.error}}": "",
|
||||
"Examples": "Przykłady",
|
||||
"Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'": "",
|
||||
"Exiting": "",
|
||||
"Exiting.": "",
|
||||
"External Adapter on which external switch will be created if no external switch is found. (hyperv driver only)": "",
|
||||
|
|
@ -178,38 +161,38 @@
|
|||
"Failed to cache ISO": "",
|
||||
"Failed to cache and load images": "",
|
||||
"Failed to cache binaries": "",
|
||||
"Failed to cache images": "",
|
||||
"Failed to cache images to tar": "",
|
||||
"Failed to cache kubectl": "",
|
||||
"Failed to change permissions for {{.minikube_dir_path}}: {{.error}}": "Nie udało się zmienić uprawnień pliku {{.minikube_dir_path}}: {{.error}}",
|
||||
"Failed to check if machine exists": "",
|
||||
"Failed to check main repository and mirrors for images for images": "",
|
||||
"Failed to delete cluster: {{.error}}": "",
|
||||
"Failed to delete images": "",
|
||||
"Failed to delete images from config": "",
|
||||
"Failed to delete node {{.name}}": "",
|
||||
"Failed to download kubectl": "Pobieranie kubectl nie powiodło się",
|
||||
"Failed to enable container runtime": "",
|
||||
"Failed to generate config": "",
|
||||
"Failed to get API Server URL": "",
|
||||
"Failed to get bootstrapper": "",
|
||||
"Failed to get command runner": "",
|
||||
"Failed to get driver URL": "",
|
||||
"Failed to get image map": "",
|
||||
"Failed to get machine client": "",
|
||||
"Failed to get service URL: {{.error}}": "",
|
||||
"Failed to kill mount process: {{.error}}": "Zabicie procesu nie powiodło się: {{.error}}",
|
||||
"Failed to list cached images": "",
|
||||
"Failed to parse kubernetes version": "",
|
||||
"Failed to reload cached images": "",
|
||||
"Failed to remove profile": "Usunięcie profilu nie powiodło się",
|
||||
"Failed to save config": "Zapisywanie konfiguracji nie powiodło się",
|
||||
"Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.": "",
|
||||
"Failed to setup certs": "Konfiguracja certyfikatów nie powiodła się",
|
||||
"Failed to setup kubeconfig": "Konfiguracja kubeconfig nie powiodła się",
|
||||
"Failed to start node {{.name}}": "",
|
||||
"Failed to stop node {{.name}}": "",
|
||||
"Failed to update cluster": "Aktualizacja klastra nie powiodła się",
|
||||
"Failed to update config": "Aktualizacja konfiguracji nie powiodła się",
|
||||
"Failed unmount: {{.error}}": "",
|
||||
"File permissions used for the mount": "",
|
||||
"Filter to use only VM Drivers": "",
|
||||
"Flags": "",
|
||||
"Follow": "",
|
||||
"For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "",
|
||||
|
|
@ -218,13 +201,16 @@
|
|||
"Force minikube to perform possibly dangerous operations": "Wymuś wykonanie potencjalnie niebezpiecznych operacji",
|
||||
"Found network options:": "Wykryto opcje sieciowe:",
|
||||
"Found {{.number}} invalid profile(s) !": "Wykryto {{.number}} nieprawidłowych profili ! ",
|
||||
"Generate unable to parse disk size '{{.diskSize}}': {{.error}}": "",
|
||||
"Generate unable to parse memory '{{.memory}}': {{.error}}": "",
|
||||
"Gets the kubernetes URL(s) for the specified service in your local cluster": "",
|
||||
"Gets the kubernetes URL(s) for the specified service in your local cluster. In the case of multiple URLs they will be printed one at a time.": "",
|
||||
"Gets the logs of the running instance, used for debugging minikube, not user code.": "Pobiera logi z aktualnie uruchomionej instancji. Przydatne do debugowania kodu który nie należy do aplikacji użytkownika",
|
||||
"Gets the status of a local kubernetes cluster": "Pobiera aktualny status klastra kubernetesa",
|
||||
"Gets the status of a local kubernetes cluster.\n\tExit status contains the status of minikube's VM, cluster and kubernetes encoded on it's bits in this order from right to left.\n\tEg: 7 meaning: 1 (for minikube NOK) + 2 (for cluster NOK) + 4 (for kubernetes NOK)": "",
|
||||
"Gets the value of PROPERTY_NAME from the minikube config file": "",
|
||||
"Getting machine config failed": "",
|
||||
"Getting bootstrapper": "",
|
||||
"Getting primary control plane": "",
|
||||
"Global Flags": "",
|
||||
"Go template format string for the cache list output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#CacheListTemplate": "",
|
||||
"Go template format string for the config view output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate": "",
|
||||
|
|
@ -235,6 +221,7 @@
|
|||
"Hyperkit is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "",
|
||||
"Hyperkit networking is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "",
|
||||
"If set, automatically updates drivers to the latest version. Defaults to true.": "",
|
||||
"If set, download tarball of preloaded images if available to improve start time. Defaults to true.": "",
|
||||
"If set, install addons. Defaults to true.": "",
|
||||
"If set, pause all namespaces": "",
|
||||
"If set, unpause all namespaces": "",
|
||||
|
|
@ -251,6 +238,8 @@
|
|||
"Install the latest hyperkit binary, and run 'minikube delete'": "",
|
||||
"Invalid size passed in argument: {{.error}}": "Nieprawidłowy rozmiar przekazany w argumencie: {{.error}}",
|
||||
"IsEnabled failed": "",
|
||||
"Istio needs {{.minCPUs}} CPUs -- your configuration only allocates {{.cpus}} CPUs": "",
|
||||
"Istio needs {{.minMem}}MB of memory -- your configuration only allocates {{.memory}}MB": "",
|
||||
"Kill the mount process spawned by minikube start": "",
|
||||
"Kubernetes {{.new}} is now available. If you would like to upgrade, specify: --kubernetes-version={{.new}}": "",
|
||||
"Kubernetes {{.version}} is not supported by this release of minikube": "",
|
||||
|
|
@ -266,6 +255,7 @@
|
|||
"Location of the VPNKit socket used for networking. If empty, disables Hyperkit VPNKitSock, if 'auto' uses Docker for Mac VPNKit connection, otherwise uses the specified VSock (hyperkit driver only)": "",
|
||||
"Location of the minikube iso": "Ścieżka do obrazu iso minikube",
|
||||
"Location of the minikube iso.": "Ścieżka do obrazu iso minikube",
|
||||
"Locations to fetch the minikube ISO from.": "",
|
||||
"Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'": "Zaloguj się i wykonaj polecenie w maszynie za pomocą ssh. Podobne do 'docker-machine ssh'",
|
||||
"Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'.": "Zaloguj się i wykonaj polecenie w maszynie za pomocą ssh. Podobne do 'docker-machine ssh'",
|
||||
"Message Size: {{.size}}": "",
|
||||
|
|
@ -283,13 +273,17 @@
|
|||
"NOTE: This process must stay alive for the mount to be accessible ...": "",
|
||||
"Networking and Connectivity Commands:": "",
|
||||
"No minikube profile was found. You can create one using `minikube start`.": "",
|
||||
"Node \"{{.node_name}}\" stopped.": "",
|
||||
"Node may be unable to resolve external DNS records": "",
|
||||
"Node operations": "",
|
||||
"Node {{.name}} was successfully deleted.": "",
|
||||
"Node {{.nodeName}} does not exist.": "",
|
||||
"Non-destructive downgrades are not supported, but you can proceed with one of the following options:\n\n 1) Recreate the cluster with Kubernetes v{{.new}}, by running:\n\n minikube delete{{.profile}}\n minikube start{{.profile}} --kubernetes-version={{.new}}\n\n 2) Create a second cluster with Kubernetes v{{.new}}, by running:\n\n minikube start -p {{.suggestedName}} --kubernetes-version={{.new}}\n\n 3) Use the existing cluster at version Kubernetes v{{.old}}, by running:\n\n minikube start{{.profile}} --kubernetes-version={{.old}}": "",
|
||||
"None of the known repositories in your location are accessible. Using {{.image_repository_name}} as fallback.": "",
|
||||
"None of the known repositories is accessible. Consider specifying an alternative image repository with --image-repository flag": "",
|
||||
"Not passing {{.name}}={{.value}} to docker env.": "",
|
||||
"Noticed that you are using minikube docker-env:": "",
|
||||
"Noticed you have an activated docker-env on {{.driver_name}} driver in this terminal:": "",
|
||||
"Number of CPUs allocated to Kubernetes.": "",
|
||||
"Number of CPUs allocated to the minikube VM": "Liczba procesorów przypisana do maszyny wirtualnej minikube",
|
||||
"Number of CPUs allocated to the minikube VM.": "Liczba procesorów przypisana do maszyny wirtualnej minikube",
|
||||
"Number of lines back to go within the log": "",
|
||||
|
|
@ -312,6 +306,7 @@
|
|||
"Please install the minikube hyperkit VM driver, or select an alternative --driver": "",
|
||||
"Please install the minikube kvm2 VM driver, or select an alternative --driver": "",
|
||||
"Please make sure the service you are looking for is deployed or is in the correct namespace.": "Proszę upewnij się, że serwis którego szukasz znajduje się w prawidłowej przestrzeni nazw",
|
||||
"Please re-eval your docker-env, To ensure your environment variables have updated ports: \n\n\t'minikube -p {{.profile_name}} docker-env'": "",
|
||||
"Please specify the directory to be mounted: \n\tminikube mount \u003csource directory\u003e:\u003ctarget directory\u003e (example: \"/host-home:/vm-home\")": "",
|
||||
"Please upgrade the '{{.driver_executable}}'. {{.documentation_url}}": "Proszę zaktualizować '{{.driver_executable}}'. {{.documentation_url}}",
|
||||
"Populates the specified folder with documentation in markdown about minikube": "",
|
||||
|
|
@ -325,11 +320,11 @@
|
|||
"Profile gets or sets the current minikube profile": "Pobiera lub ustawia aktywny profil minikube",
|
||||
"Profile name \"{{.profilename}}\" is minikube keyword. To delete profile use command minikube delete -p \u003cprofile name\u003e": "",
|
||||
"Provide VM UUID to restore MAC address (hyperkit driver only)": "",
|
||||
"Pulling base image ...": "",
|
||||
"Reboot to complete VirtualBox installation, and verify that VirtualBox is not blocked by your system": "Uruchom ponownie komputer aby zakończyć instalacje VirtualBox'a i upewnij się że nie jest on blokowany przez twój system",
|
||||
"Reboot to complete VirtualBox installation, verify that VirtualBox is not blocked by your system, and/or use another hypervisor": "",
|
||||
"Rebuild libvirt with virt-network support": "",
|
||||
"Received {{.name}} signal": "",
|
||||
"Reconfiguring existing host ...": "",
|
||||
"Registry mirrors to pass to the Docker daemon": "",
|
||||
"Reinstall VirtualBox and reboot. Alternatively, try the kvm2 driver: https://minikube.sigs.k8s.io/docs/reference/drivers/kvm2/": "",
|
||||
"Reinstall VirtualBox and verify that it is not blocked: System Preferences -\u003e Security \u0026 Privacy -\u003e General -\u003e Some system software was blocked from loading": "",
|
||||
|
|
@ -338,8 +333,9 @@
|
|||
"Removing {{.directory}} ...": "",
|
||||
"Requested cpu count {{.requested_cpus}} is less than the minimum allowed of {{.minimum_cpus}}": "",
|
||||
"Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}": "",
|
||||
"Requested memory allocation ({{.memory}}MB) is less than the default memory allocation of {{.default_memorysize}}MB. Beware that minikube might not work correctly or crash unexpectedly.": "",
|
||||
"Requested memory allocation {{.requested_size}} is less than the minimum allowed of {{.minimum_size}}": "",
|
||||
"Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommended}}MB. Kubernetes may crash unexpectedly.": "",
|
||||
"Requested memory allocation {{.requested}}MB is less than the usable minimum of {{.minimum}}MB": "",
|
||||
"Retarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "",
|
||||
"Retrieve the ssh identity key path of the specified cluster": "Pozyskuje ścieżkę do klucza ssh dla wyspecyfikowanego klastra",
|
||||
"Retrieve the ssh identity key path of the specified cluster.": "Pozyskuje ścieżkę do klucza ssh dla wyspecyfikowanego klastra.",
|
||||
"Retrieves the IP address of the running cluster": "Pobiera adres IP aktualnie uruchomionego klastra",
|
||||
|
|
@ -352,8 +348,10 @@
|
|||
"Run minikube from the C: drive.": "",
|
||||
"Run the kubernetes client, download it if necessary. Remember -- after kubectl!\n\nExamples:\nminikube kubectl -- --help\nminikube kubectl -- get pods --namespace kube-system": "",
|
||||
"Run the minikube command as an Administrator": "",
|
||||
"Run: \"{{.delete}}\", then \"{{.start}} --alsologtostderr -v=1\" to try again with more logging": "",
|
||||
"Run: 'chmod 600 $HOME/.kube/config'": "",
|
||||
"Running on localhost (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "",
|
||||
"Service '{{.service}}' was not found in '{{.namespace}}' namespace.\nYou may select another namespace by using 'minikube service {{.service}} -n \u003cnamespace\u003e'. Or list out all the services using 'minikube service list'": "",
|
||||
"Set failed": "",
|
||||
"Set flag to delete all profiles": "",
|
||||
"Set this flag to delete the '.minikube' folder from your user directory.": "",
|
||||
|
|
@ -379,8 +377,8 @@
|
|||
"Specify the 9p version that the mount should use": "",
|
||||
"Specify the ip that the mount should be setup on": "",
|
||||
"Specify the mount filesystem type (supported types: 9p)": "",
|
||||
"Starting existing {{.driver_name}} VM for \"{{.profile_name}}\" ...": "",
|
||||
"Starting node": "",
|
||||
"StartHost failed again: {{.error}}": "",
|
||||
"StartHost failed, but will try again: {{.error}}": "",
|
||||
"Starting tunnel for service {{.service}}.": "",
|
||||
"Starts a local kubernetes cluster": "Uruchamianie lokalnego klastra kubernetesa",
|
||||
"Starts a node.": "",
|
||||
|
|
@ -393,7 +391,6 @@
|
|||
"Successfully added {{.name}} to {{.cluster}}!": "",
|
||||
"Successfully deleted all profiles": "",
|
||||
"Successfully mounted {{.sourcePath}} to {{.destinationPath}}": "Pomyślnie zamontowano {{.sourcePath}} do {{.destinationPath}}",
|
||||
"Successfully powered off Hyper-V. minikube driver -- {{.driver}}": "",
|
||||
"Successfully purged minikube directory located at - [{{.minikubeDirectory}}]": "",
|
||||
"Suggestion: {{.advice}}": "Sugestia: {{.advice}}",
|
||||
"Suggestion: {{.fix}}": "",
|
||||
|
|
@ -423,12 +420,16 @@
|
|||
"The cluster dns domain name used in the kubernetes cluster": "Domena dns clastra użyta przez kubernetesa",
|
||||
"The container runtime to be used (docker, crio, containerd)": "Runtime konteneryzacji (docker, crio, containerd).",
|
||||
"The container runtime to be used (docker, crio, containerd).": "",
|
||||
"The control plane for \"{{.name}}\" is paused!": "",
|
||||
"The control plane node \"{{.name}}\" does not exist.": "",
|
||||
"The control plane node is not running (state={{.state}})": "",
|
||||
"The control plane node must be running for this command": "",
|
||||
"The cri socket path to be used.": "",
|
||||
"The docker service is currently not active": "Serwis docker jest nieaktywny",
|
||||
"The docker service within '{{.profile}}' is not active": "",
|
||||
"The docker service within '{{.name}}' is not active": "",
|
||||
"The docker-env command is only compatible with the \"docker\" runtime, but this cluster was configured to use the \"{{.runtime}}\" runtime.": "",
|
||||
"The driver '{{.driver}}' is not supported on {{.os}}": "Sterownik '{{.driver}} jest niewspierany przez system {{.os}}",
|
||||
"The driver {{.experimental}} '{{.driver}}' is not supported on {{.os}}": "",
|
||||
"The existing \"{{.profile_name}}\" VM that was created using the \"{{.old_driver}}\" driver, and is incompatible with the \"{{.driver}}\" driver.": "",
|
||||
"The existing \"{{.name}}\" VM was created using the \"{{.old}}\" driver, and is incompatible with the \"{{.new}}\" driver.": "",
|
||||
"The hyperv virtual switch name. Defaults to first found. (hyperv driver only)": "",
|
||||
"The hypervisor does not appear to be configured properly. Run 'minikube start --alsologtostderr -v=1' and inspect the error code": "",
|
||||
"The initial time interval for each check that wait performs in seconds": "",
|
||||
|
|
@ -440,10 +441,14 @@
|
|||
"The name of the node to delete": "",
|
||||
"The name of the node to start": "",
|
||||
"The node to get logs from. Defaults to the primary control plane.": "",
|
||||
"The node to ssh into. Defaults to the primary control plane.": "",
|
||||
"The none driver is not compatible with multi-node clusters.": "",
|
||||
"The none driver requires conntrack to be installed for kubernetes version {{.k8sVersion}}": "",
|
||||
"The number of bytes to use for 9p packet payload": "",
|
||||
"The number of nodes to spin up. Defaults to 1.": "",
|
||||
"The output format. One of 'json', 'table'": "",
|
||||
"The path on the file system where the docs in markdown need to be saved": "",
|
||||
"The podman service within '{{.profile}}' is not active": "",
|
||||
"The podman service within '{{.cluster}}' is not active": "",
|
||||
"The service namespace": "",
|
||||
"The service {{.service}} requires privileged ports to be exposed: {{.ports}}": "",
|
||||
"The services namespace": "",
|
||||
|
|
@ -452,17 +457,21 @@
|
|||
"The value passed to --format is invalid: {{.error}}": "Wartość przekazana do --format jest nieprawidłowa: {{.error}}",
|
||||
"The vmwarefusion driver is deprecated and support for it will be removed in a future release.\n\t\t\tPlease consider switching to the new vmware unified driver, which is intended to replace the vmwarefusion driver.\n\t\t\tSee https://minikube.sigs.k8s.io/docs/reference/drivers/vmware/ for more information.\n\t\t\tTo disable this message, run [minikube config set ShowDriverDeprecationNotification false]": "",
|
||||
"The {{.driver_name}} driver should not be used with root privileges.": "{{.driver_name}} nie powinien byc używany z przywilejami root'a.",
|
||||
"There is no local cluster named \"{{.cluster}}\"": "",
|
||||
"These changes will take effect upon a minikube delete and then a minikube start": "",
|
||||
"This addon does not have an endpoint defined for the 'addons open' command.\nYou can add one by annotating a service with the label {{.labelName}}:{{.addonName}}": "",
|
||||
"This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true": "",
|
||||
"This control plane is not running! (state={{.state}})": "",
|
||||
"This is unusual - you may want to investigate using \"{{.command}}\"": "",
|
||||
"This will keep the existing kubectl context and will create a minikube context.": "",
|
||||
"This will start the mount daemon and automatically mount files into minikube.": "",
|
||||
"Tip: To remove this root owned cluster, run: sudo {{.cmd}} delete": "",
|
||||
"Tip: To remove this root owned cluster, run: sudo {{.cmd}}": "",
|
||||
"To connect to this cluster, use: kubectl --context={{.name}}": "Aby połączyć się z klastrem użyj: kubectl --context={{.name}}",
|
||||
"To connect to this cluster, use: kubectl --context={{.profile_name}}": "Aby połaczyć się z klastem uzyj: kubectl --context={{.profile_name}}",
|
||||
"To disable this notice, run: 'minikube config set WantUpdateNotification false'": "Aby wyłączyć te notyfikację, użyj: 'minikube config set WantUpdateNotification false'",
|
||||
"To disable this notice, run: 'minikube config set WantUpdateNotification false'\\n": "",
|
||||
"To proceed, either:\n\n 1) Delete the existing \"{{.profile_name}}\" cluster using: '{{.command}} delete'\n\n * or *\n\n 2) Start the existing \"{{.profile_name}}\" cluster using: '{{.command}} start --driver={{.old_driver}}'": "",
|
||||
"To fix this, run: {{.command}}": "",
|
||||
"To proceed, either:\n\n1) Delete the existing \"{{.name}}\" cluster using: '{{.delcommand}}'\n\n* or *\n\n2) Start the existing \"{{.name}}\" cluster using: '{{.command}} --driver={{.old}}'": "",
|
||||
"To see addons list for other profiles use: `minikube addons -p name list`": "",
|
||||
"To start minikube with HyperV Powershell must be in your PATH`": "Aby uruchomić minikube z HyperV Powershell musi znajdować się w zmiennej PATH",
|
||||
"To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:": "",
|
||||
|
|
@ -472,23 +481,30 @@
|
|||
"Unable to determine a default driver to use. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/": "",
|
||||
"Unable to enable dashboard": "",
|
||||
"Unable to fetch latest version info": "",
|
||||
"Unable to find control plane": "",
|
||||
"Unable to generate docs": "",
|
||||
"Unable to generate the documentation. Please ensure that the path specified is a directory, exists \u0026 you have permission to write to it.": "",
|
||||
"Unable to get VM IP address": "",
|
||||
"Unable to get addon status for {{.name}}: {{.error}}": "",
|
||||
"Unable to get command runner": "",
|
||||
"Unable to get control plane status: {{.error}}": "",
|
||||
"Unable to get current user": "",
|
||||
"Unable to get driver IP": "",
|
||||
"Unable to get machine status": "",
|
||||
"Unable to get runtime": "",
|
||||
"Unable to get the status of the {{.name}} cluster.": "",
|
||||
"Unable to kill mount process: {{.error}}": "",
|
||||
"Unable to load cached images from config file.": "",
|
||||
"Unable to load cached images: {{.error}}": "",
|
||||
"Unable to load config: {{.error}}": "",
|
||||
"Unable to load host": "",
|
||||
"Unable to parse \"{{.kubernetes_version}}\": {{.error}}": "",
|
||||
"Unable to parse default Kubernetes version from constants: {{.error}}": "",
|
||||
"Unable to parse memory '{{.memory}}': {{.error}}": "",
|
||||
"Unable to parse oldest Kubernetes version from constants: {{.error}}": "",
|
||||
"Unable to remove machine directory: %v": "",
|
||||
"Unable to remove machine directory": "",
|
||||
"Unable to restart cluster, will reset it: {{.error}}": "",
|
||||
"Unable to start VM": "Nie można uruchomić maszyny wirtualnej",
|
||||
"Unable to start VM. Please investigate and run 'minikube delete' if possible": "",
|
||||
"Unable to start VM after repeated tries. Please try {{'minikube delete' if possible": "",
|
||||
"Unable to stop VM": "Nie można zatrzymać maszyny wirtualnej",
|
||||
"Unable to update {{.driver}} driver: {{.error}}": "",
|
||||
"Unable to verify SSH connectivity: {{.error}}. Will retry...": "",
|
||||
|
|
@ -499,6 +515,8 @@
|
|||
"Unset the KUBECONFIG environment variable, or verify that it does not point to an empty or otherwise invalid path": "",
|
||||
"Unset variables instead of setting them": "",
|
||||
"Update server returned an empty list": "",
|
||||
"Updating node": "",
|
||||
"Updating the running {{.driver_name}} \"{{.cluster}}\" {{.machine_type}} ...": "",
|
||||
"Upgrade to QEMU v3.1.0+, run 'virt-host-validate', or ensure that you are not running in a nested VM environment.": "",
|
||||
"Usage": "",
|
||||
"Usage: minikube completion SHELL": "",
|
||||
|
|
@ -518,11 +536,11 @@
|
|||
"Userspace file server:": "",
|
||||
"Using image repository {{.name}}": "",
|
||||
"Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!": "",
|
||||
"Using the running {{.driver_name}} \"{{.profile_name}}\" VM ...": "",
|
||||
"Using the {{.driver}} driver based on existing profile": "",
|
||||
"Using the {{.driver}} driver based on user configuration": "",
|
||||
"VM driver is one of: %v": "Sterownik wirtualnej maszyny to jeden z: %v",
|
||||
"VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository": "",
|
||||
"Validation unable to parse disk size '{{.diskSize}}': {{.error}}": "",
|
||||
"Verify that your HTTP_PROXY and HTTPS_PROXY environment variables are set correctly.": "Weryfikuję czy zmienne HTTP_PROXY i HTTPS_PROXY sa ustawione poprawnie",
|
||||
"Verify the IP address of the running cluster in kubeconfig.": "Weryfikuję adres IP działającego klastra w kubeconfig",
|
||||
"Verifying dashboard health ...": "Weryfikuję status dashboardu",
|
||||
|
|
@ -538,12 +556,13 @@
|
|||
"Wait failed": "",
|
||||
"Wait failed: {{.error}}": "",
|
||||
"Waiting for SSH access ...": "Oczekiwanie na połaczenie SSH...",
|
||||
"Waiting for cluster to come online ...": "",
|
||||
"Waiting for:": "Oczekiwanie na :",
|
||||
"Where to root the NFS Shares, defaults to /nfsshares (hyperkit driver only)": "",
|
||||
"Whether to use external switch over Default Switch if virtual switch not explicitly specified. (hyperv driver only)": "",
|
||||
"You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details": "",
|
||||
"You can also use 'minikube kubectl -- get pods' to invoke a matching version": "",
|
||||
"You can delete them using the following command(s):": "",
|
||||
"You have selected Kubernetes v{{.new}}, but the existing cluster is running Kubernetes v{{.old}}": "",
|
||||
"You may need to manually remove the \"{{.name}}\" VM from your hypervisor": "",
|
||||
"You may need to stop the Hyper-V Manager and run `minikube delete` again.": "",
|
||||
"You must specify a service name": "Musisz podać nazwę serwisu",
|
||||
|
|
@ -552,45 +571,43 @@
|
|||
"Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.": "",
|
||||
"Your minikube config refers to an unsupported driver. Erase ~/.minikube, and try again.": "",
|
||||
"Your minikube vm is not running, try minikube start.": "",
|
||||
"adding node": "",
|
||||
"addon '{{.name}}' is currently not enabled.\nTo enable this addon run:\nminikube addons enable {{.name}}": "",
|
||||
"addon '{{.name}}' is not a valid addon packaged with minikube.\nTo see the list of available addons run:\nminikube addons list": "",
|
||||
"addons modifies minikube addons files using subcommands like \"minikube addons enable dashboard\"": "",
|
||||
"api load": "",
|
||||
"bash completion failed": "",
|
||||
"call with cleanup=true to remove old tunnels": "",
|
||||
"command runner": "",
|
||||
"config modifies minikube config files using subcommands like \"minikube config set driver kvm\"\nConfigurable fields:\\n\\n": "",
|
||||
"config view failed": "",
|
||||
"creating api client": "",
|
||||
"dashboard service is not running: {{.error}}": "",
|
||||
"deleting node": "",
|
||||
"disable failed": "",
|
||||
"dry-run mode. Validates configuration, but does not mutate system state": "",
|
||||
"dry-run validation complete!": "",
|
||||
"enable failed": "",
|
||||
"error creating clientset": "",
|
||||
"error creating machine client": "",
|
||||
"error getting primary control plane": "",
|
||||
"error getting ssh port": "",
|
||||
"error parsing the input ip address for mount": "",
|
||||
"error starting tunnel": "",
|
||||
"error stopping tunnel": "",
|
||||
"failed to open browser: {{.error}}": "Nie udało się otworzyć przeglądarki: {{.error}}",
|
||||
"getting config": "",
|
||||
"getting primary control plane": "",
|
||||
"generating join token": "",
|
||||
"if true, will embed the certs in kubeconfig.": "",
|
||||
"if you want to create a profile you can by this command: minikube start -p {{.profile_name}}": "",
|
||||
"initialization failed, will try again: {{.error}}": "",
|
||||
"joining cluster": "",
|
||||
"kubeadm detected a TCP port conflict with another process: probably another local Kubernetes installation. Run lsof -p\u003cport\u003e to find the process and kill it": "",
|
||||
"kubectl and minikube configuration will be stored in {{.home_folder}}": "konfiguracja minikube i kubectl będzie przechowywana w katalogu {{.home_dir}}",
|
||||
"kubectl not found in PATH, but is required for the dashboard. Installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "kubectl nie zostało odnaleźione w zmiennej środowiskowej ${PATH}. Instrukcja instalacji: https://kubernetes.io/docs/tasks/tools/install-kubectl/",
|
||||
"kubectl proxy": "",
|
||||
"loading config": "",
|
||||
"libmachine failed": "",
|
||||
"logdir set failed": "",
|
||||
"machine '{{.name}}' does not exist. Proceeding ahead with recreating VM.": "",
|
||||
"max time to wait per Kubernetes core services to be healthy.": "",
|
||||
"minikube addons list --output OUTPUT. json, list": "",
|
||||
"minikube is exiting due to an error. If the above message is not useful, open an issue:": "",
|
||||
"minikube is unable to access the Google Container Registry. You may need to configure it to use a HTTP proxy.": "",
|
||||
"minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --driver\n\t- Use --force to override this connectivity check": "",
|
||||
"minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --vm-driver\n\t- Use --force to override this connectivity check": "",
|
||||
"minikube profile was successfully set to {{.profile_name}}": "",
|
||||
"minikube status --output OUTPUT. json, text": "",
|
||||
"minikube {{.version}} is available! Download it: {{.url}}": "minikube {{.version}} jest dostępne! Pobierz je z: {{.url}}",
|
||||
|
|
@ -599,14 +616,16 @@
|
|||
"mount failed": "Montowanie się nie powiodło",
|
||||
"namespaces to pause": "",
|
||||
"namespaces to unpause": "",
|
||||
"none driver does not support multi-node clusters": "",
|
||||
"not enough arguments ({{.ArgCount}}).\\nusage: minikube config set PROPERTY_NAME PROPERTY_VALUE": "",
|
||||
"pause containers": "",
|
||||
"profile sets the current minikube profile, or gets the current profile if no arguments are provided. This is used to run and manage multiple minikube instance. You can return to the default minikube profile by running `minikube profile default`": "",
|
||||
"profile {{.name}} is not running.": "",
|
||||
"reload cached images.": "",
|
||||
"reloads images previously added using the 'cache add' subcommand": "",
|
||||
"retrieving node": "",
|
||||
"saving node": "",
|
||||
"service {{.namespace_name}}/{{.service_name}} has no node port": "",
|
||||
"setting up certs": "",
|
||||
"stat failed": "",
|
||||
"status json failure": "",
|
||||
"status text failure": "",
|
||||
|
|
@ -633,16 +652,17 @@
|
|||
"usage: minikube profile [MINIKUBE_PROFILE_NAME]": "",
|
||||
"zsh completion failed": "",
|
||||
"{{.addonName}} was successfully enabled": "{{.addonName}} został aktywowany pomyślnie",
|
||||
"{{.cluster}} IP has been updated to point at {{.ip}}": "",
|
||||
"{{.cluster}} IP was already correctly configured for {{.ip}}": "",
|
||||
"{{.driver_name}} \"{{.cluster}}\" {{.machine_type}} is missing, will recreate.": "",
|
||||
"{{.driver}} does not appear to be installed": "",
|
||||
"{{.driver}} does not appear to be installed, but is specified by an existing profile. Please run 'minikube delete' or install {{.driver}}": "",
|
||||
"{{.extra_option_component_name}}.{{.key}}={{.value}}": "",
|
||||
"{{.machine}} IP has been updated to point at {{.ip}}": "",
|
||||
"{{.machine}} IP was already correctly configured for {{.ip}}": "",
|
||||
"{{.name}} cluster does not exist": "Klaster {{.name}} nie istnieje",
|
||||
"{{.name}} has no available configuration options": "{{.name}} nie posiada opcji configuracji",
|
||||
"{{.name}} is already running": "",
|
||||
"{{.name}} was successfully configured": "{{.name}} skonfigurowano pomyślnie",
|
||||
"{{.path}} is version {{.client_version}}, and is incompatible with Kubernetes {{.cluster_version}}. You will need to update {{.path}} or use 'minikube kubectl' to connect with this cluster": "",
|
||||
"{{.path}} is v{{.client_version}}, which may be incompatible with Kubernetes v{{.cluster_version}}.": "",
|
||||
"{{.prefix}}minikube {{.version}} on {{.platform}}": "{{.prefix}}minikube {{.version}} na {{.platform}}",
|
||||
"{{.type}} is not yet a supported filesystem. We will try anyways!": "{{.type}} nie jest wspierany przez system plików. I tak spróbujemy!",
|
||||
"{{.url}} is not accessible: {{.error}}": ""
|
||||
|
|
|
|||
|
|
@ -1,12 +1,11 @@
|
|||
{
|
||||
"\"The '{{.minikube_addon}}' addon is disabled": "",
|
||||
"\"{{.machineName}}\" does not exist, nothing to stop": "",
|
||||
"\"{{.minikube_addon}}\" was successfully disabled": "已成功禁用 \"{{.minikube_addon}}\"",
|
||||
"\"{{.name}}\" cluster does not exist. Proceeding ahead with cleanup.": "\"{{.name}}\" 集群不存在,将继续清理",
|
||||
"\"{{.name}}\" profile does not exist": "“{{.name}}”配置文件不存在",
|
||||
"\"{{.name}}\" profile does not exist, trying anyways.": "",
|
||||
"\"{{.node_name}}\" stopped.": "",
|
||||
"\"{{.profile_name}}\" VM does not exist, nothing to stop": "\"{{.profile_name}}\" 虚拟机不存在,没有什么可供停止的",
|
||||
"\"{{.profile_name}}\" does not exist, nothing to stop": "",
|
||||
"\"{{.profile_name}}\" host does not exist, unable to show an IP": "\"{{.profile_name}}\" 主机不存在,无法显示其IP",
|
||||
"\"{{.profile_name}}\" stopped.": "\"{{.profile_name}}\" 已停止",
|
||||
"'none' driver does not support 'minikube docker-env' command": "'none' 驱动不支持 'minikube docker-env' 命令",
|
||||
|
|
@ -14,7 +13,6 @@
|
|||
"'none' driver does not support 'minikube podman-env' command": "",
|
||||
"'none' driver does not support 'minikube ssh' command": "'none' 驱动不支持 'minikube ssh' 命令",
|
||||
"'{{.driver}}' driver reported an issue: {{.error}}": "'{{.driver}}' 驱动程序报告了一个问题: {{.error}}",
|
||||
"'{{.profile}}' is not running": "",
|
||||
"- {{.profile}}": "",
|
||||
"A VPN or firewall is interfering with HTTP access to the minikube VM. Alternatively, try a different VM driver: https://minikube.sigs.k8s.io/docs/start/": "VPN 或者防火墙正在干扰对 minikube 虚拟机的 HTTP 访问。或者,您可以使用其它的虚拟机驱动:https://minikube.sigs.k8s.io/docs/start/",
|
||||
"A firewall is blocking Docker the minikube VM from reaching the internet. You may need to configure it to use a proxy.": "防火墙正在阻止 minikube 虚拟机中的 Docker 访问互联网。您可能需要对其进行配置为使用代理",
|
||||
|
|
@ -37,12 +35,12 @@
|
|||
"Adds a node to the given cluster config, and starts it.": "",
|
||||
"Adds a node to the given cluster.": "",
|
||||
"Advanced Commands:": "高级命令:",
|
||||
"After minikube restart the dockerd ports might have changed. To ensure docker-env works properly.\nPlease re-eval the docker-env command:\n\n\t'minikube -p {{.profile_name}} docker-env'": "",
|
||||
"Aliases": "别名",
|
||||
"Allow user prompts for more information": "允许用户提示以获取更多信息",
|
||||
"Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to \\\"auto\\\" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers": "用于从中拉取 docker 镜像的备选镜像存储库。如果您对 gcr.io 的访问受到限制,则可以使用该镜像存储库。将镜像存储库设置为“auto”可让 minikube 为您选择一个存储库。对于中国大陆用户,您可以使用本地 gcr.io 镜像,例如 registry.cn-hangzhou.aliyuncs.com/google_containers",
|
||||
"Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g)": "为 minikube 虚拟机分配的 RAM 容量(格式:\u003c数字\u003e[\u003c单位\u003e],其中单位 = b、k、m 或 g)",
|
||||
"Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "为 minikube 虚拟机分配的 RAM 容量(格式:\u003c数字\u003e[\u003c单位\u003e],其中单位 = b、k、m 或 g)。",
|
||||
"Amount of RAM to allocate to Kubernetes (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "",
|
||||
"Amount of time to wait for a service in seconds": "等待服务的时间(单位秒)",
|
||||
"Amount of time to wait for service in seconds": "等待服务的时间(单位秒)",
|
||||
"Another hypervisor, such as VirtualBox, is conflicting with KVM. Please stop the other hypervisor, or use --driver to switch to it.": "",
|
||||
|
|
@ -56,6 +54,7 @@
|
|||
"Because you are using docker driver on Mac, the terminal needs to be open to run it.": "",
|
||||
"Bind Address: {{.Address}}": "",
|
||||
"Block until the apiserver is servicing API requests": "阻塞直到 apiserver 为 API 请求提供服务",
|
||||
"Both driver={{.driver}} and vm-driver={{.vmd}} have been set.\n\n Since vm-driver is deprecated, minikube will default to driver={{.driver}}.\n\n If vm-driver is set in the global config, please run \"minikube config unset vm-driver\" to resolve this warning.": "",
|
||||
"Cannot find directory {{.path}} for mount": "找不到用来挂载的 {{.path}} 目录",
|
||||
"Cannot use both --output and --format options": "不能同时使用 --output 和 --format 选项",
|
||||
"Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start": "检查 'journalctl -xeu kubelet' 的输出,尝试启动 minikube 时添加参数 --extra-config=kubelet.cgroup-driver=systemd",
|
||||
|
|
@ -80,11 +79,12 @@
|
|||
"Could not process errors from failed deletion": "无法处理删除失败的错误",
|
||||
"Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.": "需要使用的镜像镜像的国家/地区代码。留空以使用全球代码。对于中国大陆用户,请将其设置为 cn。",
|
||||
"Created a new profile : {{.profile_name}}": "创建了新的配置文件:{{.profile_name}}",
|
||||
"Creating Kubernetes in {{.driver_name}} container with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "",
|
||||
"Creating Kubernetes in {{.driver_name}} container with (CPUs={{.number_of_cpus}}), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "正在 {{.driver_name}} 容器中 创建 Kubernetes,(CPUs={{.number_of_cpus}}), 内存={{.memory_size}}MB ({{.host_memory_size}}MB 可用",
|
||||
"Creating Kubernetes in {{.driver_name}} {{.machine_type}} with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "",
|
||||
"Creating a new profile failed": "创建新的配置文件失败",
|
||||
"Creating mount {{.name}} ...": "正在创建装载 {{.name}}…",
|
||||
"Creating {{.driver_name}} VM (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "正在创建 {{.driver_name}} 虚拟机(CPUs={{.number_of_cpus}},Memory={{.memory_size}}MB, Disk={{.disk_size}}MB)...",
|
||||
"Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "",
|
||||
"DEPRECATED, use `driver` instead.": "",
|
||||
"Default group id used for the mount": "用于挂载默认的 group id",
|
||||
"Default user id used for the mount": "用于挂载默认的 user id",
|
||||
|
|
@ -112,9 +112,9 @@
|
|||
"Done! kubectl is now configured to use \"{{.name}}\"": "完成!kubectl 已经配置至 \"{{.name}}\"",
|
||||
"Done! kubectl is now configured to use {{.name}}": "完成!kubectl已经配置至{{.name}}",
|
||||
"Download complete!": "下载完成!",
|
||||
"Downloading Kubernetes {{.version}} preload ...": "",
|
||||
"Downloading VM boot image ...": "正在下载 VM boot image...",
|
||||
"Downloading driver {{.driver}}:": "正在下载驱动 {{.driver}}:",
|
||||
"Downloading preloaded images tarball for k8s {{.version}} ...": "",
|
||||
"Downloading {{.name}} {{.version}}": "正在下载 {{.name}} {{.version}}",
|
||||
"ERROR creating `registry-creds-acr` secret": "",
|
||||
"ERROR creating `registry-creds-dpr` secret": "创建 `registry-creds-dpr` secret 时出错",
|
||||
|
|
@ -162,21 +162,17 @@
|
|||
"Error getting cluster bootstrapper": "获取 cluster bootstrapper 时出错",
|
||||
"Error getting cluster config": "",
|
||||
"Error getting config": "获取 config 时出错",
|
||||
"Error getting control plane": "",
|
||||
"Error getting host": "获取 host 时出错",
|
||||
"Error getting host IP": "",
|
||||
"Error getting host status": "获取 host status 时出错",
|
||||
"Error getting machine logs": "获取 machine logs 时出错",
|
||||
"Error getting machine status": "获取 machine status 时出错",
|
||||
"Error getting port binding for '{{.driver_name}} driver: {{.error}}": "",
|
||||
"Error getting primary control plane": "",
|
||||
"Error getting primary cp": "",
|
||||
"Error getting profiles to delete": "获取待删除配置文件时出错",
|
||||
"Error getting service status": "获取 service status 时出错",
|
||||
"Error getting service with namespace: {{.namespace}} and labels {{.labelName}}:{{.addonName}}: {{.error}}": "使用 namespace: {{.namespace}} 和 labels {{.labelName}}:{{.addonName}} 获取 service 时出错:{{.error}}",
|
||||
"Error getting ssh client": "",
|
||||
"Error getting the host IP address to use from within the VM": "从虚拟机中获取 host IP 地址时出错",
|
||||
"Error host driver ip status": "",
|
||||
"Error killing mount process": "杀死 mount 进程时出错",
|
||||
"Error loading api": "加载 api 时出错",
|
||||
"Error loading profile config": "加载配置文件的配置时出错",
|
||||
|
|
@ -187,11 +183,9 @@
|
|||
"Error parsing minikube version: {{.error}}": "解析 minikube 版本时出错:{{.error}}",
|
||||
"Error reading {{.path}}: {{.error}}": "读取 {{.path}} 时出错:{{.error}}",
|
||||
"Error restarting cluster": "重启 cluster 时出错",
|
||||
"Error retrieving node": "",
|
||||
"Error setting shell variables": "设置 shell 变量时出错",
|
||||
"Error starting cluster": "开启 cluster 时出错",
|
||||
"Error starting mount": "开启 mount 时出错",
|
||||
"Error starting node": "",
|
||||
"Error unsetting shell variables": "取消设置 shell 变量时出错",
|
||||
"Error while setting kubectl current context : {{.error}}": "设置 kubectl 上下文时出错 :{{.error}}",
|
||||
"Error writing mount pid": "写入 mount pid 时出错",
|
||||
|
|
@ -199,6 +193,7 @@
|
|||
"Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}": "错误:您已选择 Kubernetes v{{.new}},但您的配置文件的现有集群正在运行 Kubernetes v{{.old}}。非破坏性降级不受支持,但若要继续操作,您可以执行以下选项之一:\n* 使用 Kubernetes v{{.new}} 重新创建现有集群:运行“minikube delete {{.profile}}”,然后运行“minikube start {{.profile}} --kubernetes-version={{.new}}”\n* 使用 Kubernetes v{{.new}} 再创建一个集群:运行“minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}”\n* 通过 Kubernetes v{{.old}} 或更高版本重复使用现有集群:运行“minikube start {{.profile}} --kubernetes-version={{.old}}”",
|
||||
"Error: [{{.id}}] {{.error}}": "错误:[{{.id}}] {{.error}}",
|
||||
"Examples": "示例",
|
||||
"Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'": "",
|
||||
"Exiting": "正在退出",
|
||||
"Exiting due to driver incompatibility": "由于驱动程序不兼容而退出",
|
||||
"Exiting.": "正在退出。",
|
||||
|
|
@ -217,10 +212,10 @@
|
|||
"Failed to delete cluster: {{.error}}__1": "未能删除集群:{{.error}}",
|
||||
"Failed to delete images": "删除镜像时失败",
|
||||
"Failed to delete images from config": "无法删除配置的镜像",
|
||||
"Failed to delete node {{.name}}": "",
|
||||
"Failed to download kubectl": "下载 kubectl 失败",
|
||||
"Failed to enable container runtime": "",
|
||||
"Failed to generate config": "无法生成配置",
|
||||
"Failed to get API Server URL": "",
|
||||
"Failed to get bootstrapper": "获取 bootstrapper 失败",
|
||||
"Failed to get command runner": "",
|
||||
"Failed to get driver URL": "获取 driver URL 失败",
|
||||
|
|
@ -229,6 +224,7 @@
|
|||
"Failed to get service URL: {{.error}}": "获取 service URL 失败:{{.error}}",
|
||||
"Failed to kill mount process: {{.error}}": "未能终止装载进程:{{.error}}",
|
||||
"Failed to list cached images": "无法列出缓存镜像",
|
||||
"Failed to parse kubernetes version": "",
|
||||
"Failed to reload cached images": "重新加载缓存镜像失败",
|
||||
"Failed to remove profile": "无法删除配置文件",
|
||||
"Failed to save config": "无法保存配置",
|
||||
|
|
@ -236,12 +232,12 @@
|
|||
"Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.": "未能设置 NO_PROXY 环境变量。请使用“export NO_PROXY=$NO_PROXY,{{.ip}}”。",
|
||||
"Failed to setup certs": "设置 certs 失败",
|
||||
"Failed to setup kubeconfig": "设置 kubeconfig 失败",
|
||||
"Failed to start node {{.name}}": "",
|
||||
"Failed to stop node {{.name}}": "",
|
||||
"Failed to update cluster": "更新 cluster 失败",
|
||||
"Failed to update config": "更新 config 失败",
|
||||
"Failed unmount: {{.error}}": "unmount 失败:{{.error}}",
|
||||
"File permissions used for the mount": "用于 mount 的文件权限",
|
||||
"Filter to use only VM Drivers": "",
|
||||
"Flags": "标志",
|
||||
"Follow": "跟踪",
|
||||
"For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "为获得最佳结果,请安装 kubectl:https://kubernetes.io/docs/tasks/tools/install-kubectl/",
|
||||
|
|
@ -251,13 +247,17 @@
|
|||
"Force minikube to perform possibly dangerous operations": "强制 minikube 执行可能有风险的操作",
|
||||
"Found network options:": "找到的网络选项:",
|
||||
"Found {{.number}} invalid profile(s) !": "找到 {{.number}} 个无效的配置文件!",
|
||||
"Generate unable to parse disk size '{{.diskSize}}': {{.error}}": "",
|
||||
"Generate unable to parse memory '{{.memory}}': {{.error}}": "",
|
||||
"Gets the kubernetes URL(s) for the specified service in your local cluster": "获取本地集群中指定服务的 kubernetes URL",
|
||||
"Gets the kubernetes URL(s) for the specified service in your local cluster. In the case of multiple URLs they will be printed one at a time.": "获取本地集群中指定服务的 kubernetes URL。如果有多个 URL,他们将一次打印一个",
|
||||
"Gets the logs of the running instance, used for debugging minikube, not user code.": "获取正在运行的实例日志,用于调试 minikube,不是用户代码",
|
||||
"Gets the status of a local kubernetes cluster": "获取本地 kubernetes 集群状态",
|
||||
"Gets the status of a local kubernetes cluster.\n\tExit status contains the status of minikube's VM, cluster and kubernetes encoded on it's bits in this order from right to left.\n\tEg: 7 meaning: 1 (for minikube NOK) + 2 (for cluster NOK) + 4 (for kubernetes NOK)": "",
|
||||
"Gets the value of PROPERTY_NAME from the minikube config file": "",
|
||||
"Getting bootstrapper": "",
|
||||
"Getting machine config failed": "获取机器配置失败",
|
||||
"Getting primary control plane": "",
|
||||
"Global Flags": "",
|
||||
"Go template format string for the cache list output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#CacheListTemplate": "",
|
||||
"Go template format string for the config view output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate": "",
|
||||
|
|
@ -270,6 +270,7 @@
|
|||
"Hyperkit networking is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "",
|
||||
"Hyperkit networking is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --vm-driver": "Hyperkit 的网络挂了。升级到最新的 hyperkit 版本以及/或者 Docker 桌面版。或者,你可以通过 --vm-driver 切换其他选项",
|
||||
"If set, automatically updates drivers to the latest version. Defaults to true.": "如果设置了,将自动更新驱动到最新版本。默认为 true。",
|
||||
"If set, download tarball of preloaded images if available to improve start time. Defaults to true.": "",
|
||||
"If set, install addons. Defaults to true.": "",
|
||||
"If set, pause all namespaces": "",
|
||||
"If set, unpause all namespaces": "",
|
||||
|
|
@ -286,12 +287,12 @@
|
|||
"Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added.": "传递给 Docker 守护进程的不安全 Docker 注册表。系统会自动添加默认服务 CIDR 范围。",
|
||||
"Install VirtualBox, or select an alternative value for --driver": "",
|
||||
"Install the latest hyperkit binary, and run 'minikube delete'": "",
|
||||
"Invalid size passed in argument: {{.error}}": "",
|
||||
"IsEnabled failed": "",
|
||||
"Istio needs {{.minCPUs}} CPUs -- your configuration only allocates {{.cpus}} CPUs": "",
|
||||
"Istio needs {{.minMem}}MB of memory -- your configuration only allocates {{.memory}}MB": "",
|
||||
"Kill the mount process spawned by minikube start": "",
|
||||
"Kubernetes {{.new}} is now available. If you would like to upgrade, specify: --kubernetes-version={{.new}}": "Kubernetes {{.new}} 现在可用了。如果您想升级,请指定 --kubernetes-version={{.new}}",
|
||||
"Kubernetes {{.version}} is not supported by this release of minikube": "当前版本的 minukube 不支持 Kubernetes {{.version}}",
|
||||
"Launching Kubernetes ...": "",
|
||||
"Launching Kubernetes ... ": "正在启动 Kubernetes ... ",
|
||||
"Launching proxy ...": "",
|
||||
"List all available images from the local cache.": "",
|
||||
|
|
@ -303,7 +304,7 @@
|
|||
"Local folders to share with Guest via NFS mounts (hyperkit driver only)": "通过 NFS 装载与访客共享的本地文件夹(仅限 hyperkit 驱动程序)",
|
||||
"Location of the VPNKit socket used for networking. If empty, disables Hyperkit VPNKitSock, if 'auto' uses Docker for Mac VPNKit connection, otherwise uses the specified VSock (hyperkit driver only)": "用于网络连接的 VPNKit 套接字的位置。如果为空,则停用 Hyperkit VPNKitSock;如果为“auto”,则将 Docker 用于 Mac VPNKit 连接;否则使用指定的 VSock(仅限 hyperkit 驱动程序)",
|
||||
"Location of the minikube iso": "minikube iso 的位置",
|
||||
"Location of the minikube iso.": "",
|
||||
"Locations to fetch the minikube ISO from.": "",
|
||||
"Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'": "",
|
||||
"Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'.": "",
|
||||
"Message Size: {{.size}}": "",
|
||||
|
|
@ -321,15 +322,18 @@
|
|||
"NOTE: This process must stay alive for the mount to be accessible ...": "",
|
||||
"Networking and Connectivity Commands:": "网络和连接命令:",
|
||||
"No minikube profile was found. You can create one using `minikube start`.": "",
|
||||
"Node \"{{.node_name}}\" stopped.": "",
|
||||
"Node may be unable to resolve external DNS records": "",
|
||||
"Node operations": "",
|
||||
"Node {{.name}} was successfully deleted.": "",
|
||||
"Node {{.nodeName}} does not exist.": "",
|
||||
"Non-destructive downgrades are not supported, but you can proceed with one of the following options:\n\n 1) Recreate the cluster with Kubernetes v{{.new}}, by running:\n\n minikube delete{{.profile}}\n minikube start{{.profile}} --kubernetes-version={{.new}}\n\n 2) Create a second cluster with Kubernetes v{{.new}}, by running:\n\n minikube start -p {{.suggestedName}} --kubernetes-version={{.new}}\n\n 3) Use the existing cluster at version Kubernetes v{{.old}}, by running:\n\n minikube start{{.profile}} --kubernetes-version={{.old}}": "",
|
||||
"None of the known repositories in your location are accessible. Using {{.image_repository_name}} as fallback.": "您所在位置的已知存储库都无法访问。正在将 {{.image_repository_name}} 用作后备存储库。",
|
||||
"None of the known repositories is accessible. Consider specifying an alternative image repository with --image-repository flag": "已知存储库都无法访问。请考虑使用 --image-repository 标志指定备选镜像存储库",
|
||||
"Not passing {{.name}}={{.value}} to docker env.": "",
|
||||
"Noticed that you are using minikube docker-env:": "",
|
||||
"Noticed you have an activated docker-env on {{.driver_name}} driver in this terminal:": "",
|
||||
"Number of CPUs allocated to Kubernetes.": "",
|
||||
"Number of CPUs allocated to the minikube VM": "分配给 minikube 虚拟机的 CPU 的数量",
|
||||
"Number of CPUs allocated to the minikube VM.": "",
|
||||
"Number of lines back to go within the log": "",
|
||||
"OS release is {{.pretty_name}}": "",
|
||||
"Open the addons URL with https instead of http": "",
|
||||
|
|
@ -350,6 +354,7 @@
|
|||
"Please install the minikube hyperkit VM driver, or select an alternative --driver": "",
|
||||
"Please install the minikube kvm2 VM driver, or select an alternative --driver": "",
|
||||
"Please make sure the service you are looking for is deployed or is in the correct namespace.": "",
|
||||
"Please re-eval your docker-env, To ensure your environment variables have updated ports: \n\n\t'minikube -p {{.profile_name}} docker-env'": "",
|
||||
"Please specify the directory to be mounted: \n\tminikube mount \u003csource directory\u003e:\u003ctarget directory\u003e (example: \"/host-home:/vm-home\")": "",
|
||||
"Please upgrade the '{{.driver_executable}}'. {{.documentation_url}}": "请升级“{{.driver_executable}}”。{{.documentation_url}}",
|
||||
"Populates the specified folder with documentation in markdown about minikube": "",
|
||||
|
|
@ -363,6 +368,7 @@
|
|||
"Profile gets or sets the current minikube profile": "获取或设置当前的 minikube 配置文件",
|
||||
"Profile name \"{{.profilename}}\" is minikube keyword. To delete profile use command minikube delete -p \u003cprofile name\u003e": "配置文件名称 \"{{.profilename}}\" 是 minikube 的一个关键字。使用 minikube delete -p \u003cprofile name\u003e 命令 删除配置文件",
|
||||
"Provide VM UUID to restore MAC address (hyperkit driver only)": "提供虚拟机 UUID 以恢复 MAC 地址(仅限 hyperkit 驱动程序)",
|
||||
"Pulling base image ...": "",
|
||||
"Pulling images ...": "拉取镜像 ...",
|
||||
"Reboot to complete VirtualBox installation, verify that VirtualBox is not blocked by your system, and/or use another hypervisor": "重启以完成 VirtualBox 安装,检查 VirtualBox 未被您的操作系统禁用,或者使用其他的管理程序。",
|
||||
"Rebuild libvirt with virt-network support": "",
|
||||
|
|
@ -378,7 +384,10 @@
|
|||
"Requested cpu count {{.requested_cpus}} is less than the minimum allowed of {{.minimum_cpus}}": "请求的 CPU 数量 {{.requested_cpus}} 小于允许的最小值 {{.minimum_cpus}}",
|
||||
"Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}": "请求的磁盘大小 {{.requested_size}} 小于最小值 {{.minimum_size}}",
|
||||
"Requested memory allocation ({{.memory}}MB) is less than the default memory allocation of {{.default_memorysize}}MB. Beware that minikube might not work correctly or crash unexpectedly.": "请求的内存分配 ({{.memory}}MB) 小于默认内存分配 {{.default_memorysize}}MB。请注意 minikube 可能无法正常运行或可能会意外崩溃。",
|
||||
"Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommended}}MB. Kubernetes may crash unexpectedly.": "",
|
||||
"Requested memory allocation {{.requested_size}} is less than the minimum allowed of {{.minimum_size}}": "请求的内存分配 {{.requested_size}} 小于允许的 {{.minimum_size}} 最小值",
|
||||
"Requested memory allocation {{.requested}}MB is less than the usable minimum of {{.minimum}}MB": "",
|
||||
"Retarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "",
|
||||
"Retrieve the ssh identity key path of the specified cluster": "检索指定集群的 ssh 密钥路径",
|
||||
"Retrieve the ssh identity key path of the specified cluster.": "检索指定集群的 ssh 密钥路径。",
|
||||
"Retrieves the IP address of the running cluster": "检索正在运行的群集的 IP 地址",
|
||||
|
|
@ -391,10 +400,12 @@
|
|||
"Run minikube from the C: drive.": "",
|
||||
"Run the kubernetes client, download it if necessary. Remember -- after kubectl!\n\nExamples:\nminikube kubectl -- --help\nminikube kubectl -- get pods --namespace kube-system": "",
|
||||
"Run the minikube command as an Administrator": "",
|
||||
"Run: \"{{.delete}}\", then \"{{.start}} --alsologtostderr -v=1\" to try again with more logging": "",
|
||||
"Run: 'chmod 600 $HOME/.kube/config'": "执行 'chmod 600 $HOME/.kube/config'",
|
||||
"Running on localhost (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "",
|
||||
"Selecting '{{.driver}}' driver from existing profile (alternates: {{.alternates}})": "从现有配置文件中选择 '{{.driver}}' 驱动程序 (可选:{{.alternates}})",
|
||||
"Selecting '{{.driver}}' driver from user configuration (alternates: {{.alternates}})": "从用户配置中选择 {{.driver}}' 驱动程序(可选:{{.alternates}})",
|
||||
"Service '{{.service}}' was not found in '{{.namespace}}' namespace.\nYou may select another namespace by using 'minikube service {{.service}} -n \u003cnamespace\u003e'. Or list out all the services using 'minikube service list'": "",
|
||||
"Set failed": "",
|
||||
"Set flag to delete all profiles": "设置标志以删除所有配置文件",
|
||||
"Set this flag to delete the '.minikube' folder from your user directory.": "设置这个标志来删除您用户目录下的 '.minikube' 文件夹。",
|
||||
|
|
@ -420,8 +431,8 @@
|
|||
"Specify the 9p version that the mount should use": "",
|
||||
"Specify the ip that the mount should be setup on": "",
|
||||
"Specify the mount filesystem type (supported types: 9p)": "",
|
||||
"Starting existing {{.driver_name}} VM for \"{{.profile_name}}\" ...": "",
|
||||
"Starting node": "",
|
||||
"StartHost failed again: {{.error}}": "",
|
||||
"StartHost failed, but will try again: {{.error}}": "",
|
||||
"Starting tunnel for service {{.service}}.": "",
|
||||
"Starts a local kubernetes cluster": "启动本地 kubernetes 集群",
|
||||
"Starts a node.": "",
|
||||
|
|
@ -469,12 +480,16 @@
|
|||
"The cluster dns domain name used in the kubernetes cluster": "kubernetes 集群中使用的集群 dns 域名",
|
||||
"The container runtime to be used (docker, crio, containerd)": "需要使用的容器运行时(docker、crio、containerd)",
|
||||
"The container runtime to be used (docker, crio, containerd).": "",
|
||||
"The control plane for \"{{.name}}\" is paused!": "",
|
||||
"The control plane node \"{{.name}}\" does not exist.": "",
|
||||
"The control plane node is not running (state={{.state}})": "",
|
||||
"The control plane node must be running for this command": "",
|
||||
"The cri socket path to be used": "需要使用的 cri 套接字路径",
|
||||
"The cri socket path to be used.": "",
|
||||
"The docker service within '{{.profile}}' is not active": "",
|
||||
"The docker service within '{{.name}}' is not active": "",
|
||||
"The docker-env command is only compatible with the \"docker\" runtime, but this cluster was configured to use the \"{{.runtime}}\" runtime.": "",
|
||||
"The driver '{{.driver}}' is not supported on {{.os}}": "{{.os}} 不支持驱动程序“{{.driver}}”",
|
||||
"The driver {{.experimental}} '{{.driver}}' is not supported on {{.os}}": "",
|
||||
"The existing \"{{.profile_name}}\" VM that was created using the \"{{.old_driver}}\" driver, and is incompatible with the \"{{.driver}}\" driver.": "",
|
||||
"The existing \"{{.name}}\" VM was created using the \"{{.old}}\" driver, and is incompatible with the \"{{.new}}\" driver.": "",
|
||||
"The hyperv virtual switch name. Defaults to first found. (hyperv driver only)": "hyperv 虚拟交换机名称。默认为找到的第一个 hyperv 虚拟交换机。(仅限 hyperv 驱动程序)",
|
||||
"The hypervisor does not appear to be configured properly. Run 'minikube start --alsologtostderr -v=1' and inspect the error code": "管理程序似乎配置的不正确。执行 'minikube start --alsologtostderr -v=1' 并且检查错误代码",
|
||||
"The initial time interval for each check that wait performs in seconds": "",
|
||||
|
|
@ -486,10 +501,14 @@
|
|||
"The name of the node to delete": "",
|
||||
"The name of the node to start": "",
|
||||
"The node to get logs from. Defaults to the primary control plane.": "",
|
||||
"The node to ssh into. Defaults to the primary control plane.": "",
|
||||
"The none driver is not compatible with multi-node clusters.": "",
|
||||
"The none driver requires conntrack to be installed for kubernetes version {{.k8sVersion}}": "",
|
||||
"The number of bytes to use for 9p packet payload": "",
|
||||
"The number of nodes to spin up. Defaults to 1.": "",
|
||||
"The output format. One of 'json', 'table'": "输出的格式。'json' 或者 'table'",
|
||||
"The path on the file system where the docs in markdown need to be saved": "",
|
||||
"The podman service within '{{.profile}}' is not active": "",
|
||||
"The podman service within '{{.cluster}}' is not active": "",
|
||||
"The service namespace": "",
|
||||
"The service {{.service}} requires privileged ports to be exposed: {{.ports}}": "",
|
||||
"The services namespace": "",
|
||||
|
|
@ -498,19 +517,24 @@
|
|||
"The value passed to --format is invalid: {{.error}}": "",
|
||||
"The vmwarefusion driver is deprecated and support for it will be removed in a future release.\n\t\t\tPlease consider switching to the new vmware unified driver, which is intended to replace the vmwarefusion driver.\n\t\t\tSee https://minikube.sigs.k8s.io/docs/reference/drivers/vmware/ for more information.\n\t\t\tTo disable this message, run [minikube config set ShowDriverDeprecationNotification false]": "",
|
||||
"The {{.driver_name}} driver should not be used with root privileges.": "不应以根权限使用 {{.driver_name}} 驱动程序。",
|
||||
"There is no local cluster named \"{{.cluster}}\"": "",
|
||||
"There's a new version for '{{.driver_executable}}'. Please consider upgrading. {{.documentation_url}}": "“{{.driver_executable}}”有一个新版本。请考虑升级。{{.documentation_url}}",
|
||||
"These changes will take effect upon a minikube delete and then a minikube start": "",
|
||||
"This addon does not have an endpoint defined for the 'addons open' command.\nYou can add one by annotating a service with the label {{.labelName}}:{{.addonName}}": "",
|
||||
"This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true": "此操作还可通过设置环境变量 CHANGE_MINIKUBE_NONE_USER=true 自动完成",
|
||||
"This control plane is not running! (state={{.state}})": "",
|
||||
"This is unusual - you may want to investigate using \"{{.command}}\"": "",
|
||||
"This will keep the existing kubectl context and will create a minikube context.": "这将保留现有 kubectl 上下文并创建 minikube 上下文。",
|
||||
"This will start the mount daemon and automatically mount files into minikube": "这将启动装载守护进程并将文件自动装载到 minikube 中",
|
||||
"This will start the mount daemon and automatically mount files into minikube.": "",
|
||||
"Tip: To remove this root owned cluster, run: sudo {{.cmd}}": "",
|
||||
"Tip: To remove this root owned cluster, run: sudo {{.cmd}} delete": "提示:要移除这个由根用户拥有的集群,请运行 sudo {{.cmd}} delete",
|
||||
"To connect to this cluster, use: kubectl --context={{.name}}": "如需连接到此集群,请使用 kubectl --context={{.name}}",
|
||||
"To connect to this cluster, use: kubectl --context={{.name}}__1": "如需连接到此集群,请使用 kubectl --context={{.name}}",
|
||||
"To connect to this cluster, use: kubectl --context={{.profile_name}}": "",
|
||||
"To disable this notice, run: 'minikube config set WantUpdateNotification false'\\n": "",
|
||||
"To proceed, either:\n\n 1) Delete the existing \"{{.profile_name}}\" cluster using: '{{.command}} delete'\n\n * or *\n\n 2) Start the existing \"{{.profile_name}}\" cluster using: '{{.command}} start --driver={{.old_driver}}'": "",
|
||||
"To fix this, run: {{.command}}": "",
|
||||
"To proceed, either:\n\n1) Delete the existing \"{{.name}}\" cluster using: '{{.delcommand}}'\n\n* or *\n\n2) Start the existing \"{{.name}}\" cluster using: '{{.command}} --driver={{.old}}'": "",
|
||||
"To see addons list for other profiles use: `minikube addons -p name list`": "",
|
||||
"To start minikube with HyperV Powershell must be in your PATH`": "",
|
||||
"To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:": "如需以您自己的用户身份使用 kubectl 或 minikube 命令,您可能需要重新定位该命令。例如,如需覆盖您的自定义设置,请运行:",
|
||||
|
|
@ -521,23 +545,32 @@
|
|||
"Unable to determine a default driver to use. Try specifying --vm-driver, or see https://minikube.sigs.k8s.io/docs/start/": "无法确定要使用的默认驱动。尝试通过 --vm-dirver 指定,或者查阅 https://minikube.sigs.k8s.io/docs/start/",
|
||||
"Unable to enable dashboard": "",
|
||||
"Unable to fetch latest version info": "",
|
||||
"Unable to find control plane": "",
|
||||
"Unable to generate docs": "",
|
||||
"Unable to generate the documentation. Please ensure that the path specified is a directory, exists \u0026 you have permission to write to it.": "",
|
||||
"Unable to get VM IP address": "",
|
||||
"Unable to get addon status for {{.name}}: {{.error}}": "",
|
||||
"Unable to get bootstrapper: {{.error}}": "无法获取引导程序:{{.error}}",
|
||||
"Unable to get command runner": "",
|
||||
"Unable to get control plane status: {{.error}}": "",
|
||||
"Unable to get current user": "",
|
||||
"Unable to get driver IP": "",
|
||||
"Unable to get machine status": "",
|
||||
"Unable to get runtime": "",
|
||||
"Unable to get the status of the {{.name}} cluster.": "无法获取 {{.name}} 集群状态。",
|
||||
"Unable to kill mount process: {{.error}}": "",
|
||||
"Unable to load cached images from config file.": "无法从配置文件中加载缓存的镜像。",
|
||||
"Unable to load cached images: {{.error}}": "",
|
||||
"Unable to load config: {{.error}}": "无法加载配置:{{.error}}",
|
||||
"Unable to load host": "",
|
||||
"Unable to parse \"{{.kubernetes_version}}\": {{.error}}": "无法解析“{{.kubernetes_version}}”:{{.error}}",
|
||||
"Unable to parse default Kubernetes version from constants: {{.error}}": "无法从常量中解析默认的 Kubernetes 版本号: {{.error}}",
|
||||
"Unable to parse memory '{{.memory}}': {{.error}}": "",
|
||||
"Unable to parse oldest Kubernetes version from constants: {{.error}}": "无法从常量中解析最旧的 Kubernetes 版本号: {{.error}}",
|
||||
"Unable to pull images, which may be OK: {{.error}}": "无法拉取镜像,有可能是正常状况:{{.error}}",
|
||||
"Unable to remove machine directory: %v": "",
|
||||
"Unable to remove machine directory": "",
|
||||
"Unable to restart cluster, will reset it: {{.error}}": "",
|
||||
"Unable to start VM after repeated tries. Please try {{'minikube delete' if possible": "",
|
||||
"Unable to start VM. Please investigate and run 'minikube delete' if possible": "无法启动虚拟机。可能的话请检查后执行 'minikube delete'",
|
||||
"Unable to stop VM": "无法停止虚拟机",
|
||||
"Unable to update {{.driver}} driver: {{.error}}": "",
|
||||
|
|
@ -549,6 +582,8 @@
|
|||
"Unset the KUBECONFIG environment variable, or verify that it does not point to an empty or otherwise invalid path": "",
|
||||
"Unset variables instead of setting them": "",
|
||||
"Update server returned an empty list": "",
|
||||
"Updating node": "",
|
||||
"Updating the running {{.driver_name}} \"{{.cluster}}\" {{.machine_type}} ...": "",
|
||||
"Upgrade to QEMU v3.1.0+, run 'virt-host-validate', or ensure that you are not running in a nested VM environment.": "",
|
||||
"Upgrading from Kubernetes {{.old}} to {{.new}}": "正在从 Kubernetes {{.old}} 升级到 {{.new}}",
|
||||
"Usage": "使用方法",
|
||||
|
|
@ -575,6 +610,7 @@
|
|||
"VM driver is one of: %v": "虚拟机驱动程序是以下项之一:%v",
|
||||
"VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository": "虚拟机无权访问 {{.repository}},或许您需要配置代理或者设置 --image-repository",
|
||||
"VM may be unable to resolve external DNS records": "虚拟机可能无法解析外部 DNS 记录",
|
||||
"Validation unable to parse disk size '{{.diskSize}}': {{.error}}": "",
|
||||
"Verify that your HTTP_PROXY and HTTPS_PROXY environment variables are set correctly.": "验证是否正确设置了 HTTP_PROXY 和 HTTPS_PROXY 环境变量。",
|
||||
"Verify the IP address of the running cluster in kubeconfig.": "在 kubeconfig 中验证正在运行的集群 IP 地址。",
|
||||
"Verifying dashboard health ...": "正在验证 dashboard 运行情况 ...",
|
||||
|
|
@ -597,7 +633,9 @@
|
|||
"Where to root the NFS Shares, defaults to /nfsshares (hyperkit driver only)": "NFS 共享的根目录位置,默认为 /nfsshares(仅限 hyperkit 驱动程序)",
|
||||
"Whether to use external switch over Default Switch if virtual switch not explicitly specified. (hyperv driver only)": "",
|
||||
"You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details": "您似乎正在使用代理,但您的 NO_PROXY 环境不包含 minikube IP ({{.ip_address}})。如需了解详情,请参阅 {{.documentation_url}}",
|
||||
"You can also use 'minikube kubectl -- get pods' to invoke a matching version": "",
|
||||
"You can delete them using the following command(s):": "",
|
||||
"You have selected Kubernetes v{{.new}}, but the existing cluster is running Kubernetes v{{.old}}": "",
|
||||
"You may need to manually remove the \"{{.name}}\" VM from your hypervisor": "您可能需要从管理程序中手动移除“{{.name}}”虚拟机",
|
||||
"You may need to stop the Hyper-V Manager and run `minikube delete` again.": "",
|
||||
"You must specify a service name": "",
|
||||
|
|
@ -606,46 +644,44 @@
|
|||
"Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.": "",
|
||||
"Your minikube config refers to an unsupported driver. Erase ~/.minikube, and try again.": "",
|
||||
"Your minikube vm is not running, try minikube start.": "",
|
||||
"adding node": "",
|
||||
"addon '{{.name}}' is currently not enabled.\nTo enable this addon run:\nminikube addons enable {{.name}}": "",
|
||||
"addon '{{.name}}' is not a valid addon packaged with minikube.\nTo see the list of available addons run:\nminikube addons list": "",
|
||||
"addon enable failed": "启用插件失败",
|
||||
"addons modifies minikube addons files using subcommands like \"minikube addons enable dashboard\"": "插件使用诸如 \"minikube addons enable dashboard\" 的子命令修改 minikube 的插件文件",
|
||||
"api load": "",
|
||||
"bash completion failed": "",
|
||||
"call with cleanup=true to remove old tunnels": "",
|
||||
"command runner": "",
|
||||
"config modifies minikube config files using subcommands like \"minikube config set driver kvm\"\nConfigurable fields:\\n\\n": "",
|
||||
"config view failed": "",
|
||||
"creating api client": "",
|
||||
"dashboard service is not running: {{.error}}": "",
|
||||
"deleting node": "",
|
||||
"disable failed": "禁用失败",
|
||||
"dry-run mode. Validates configuration, but does not mutate system state": "",
|
||||
"dry-run validation complete!": "",
|
||||
"enable failed": "开启失败",
|
||||
"error creating clientset": "",
|
||||
"error creating machine client": "",
|
||||
"error getting primary control plane": "",
|
||||
"error getting ssh port": "",
|
||||
"error parsing the input ip address for mount": "",
|
||||
"error starting tunnel": "",
|
||||
"error stopping tunnel": "",
|
||||
"failed to open browser: {{.error}}": "",
|
||||
"getting config": "",
|
||||
"getting primary control plane": "",
|
||||
"generating join token": "",
|
||||
"if true, will embed the certs in kubeconfig.": "",
|
||||
"if you want to create a profile you can by this command: minikube start -p {{.profile_name}}": "",
|
||||
"initialization failed, will try again: {{.error}}": "",
|
||||
"joining cluster": "",
|
||||
"kubeadm detected a TCP port conflict with another process: probably another local Kubernetes installation. Run lsof -p\u003cport\u003e to find the process and kill it": "kubeadm 检测一个到与其他进程的 TCP 端口冲突:或许是另外的本地安装的 Kubernetes 导致。执行 lsof -p\u003cport\u003e 查找并杀死这些进程",
|
||||
"kubectl and minikube configuration will be stored in {{.home_folder}}": "kubectl 和 minikube 配置将存储在 {{.home_folder}} 中",
|
||||
"kubectl not found in PATH, but is required for the dashboard. Installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "",
|
||||
"kubectl proxy": "",
|
||||
"loading config": "",
|
||||
"libmachine failed": "",
|
||||
"logdir set failed": "",
|
||||
"machine '{{.name}}' does not exist. Proceeding ahead with recreating VM.": "",
|
||||
"max time to wait per Kubernetes core services to be healthy.": "每个 Kubernetes 核心服务保持健康所需的最长时间。",
|
||||
"minikube addons list --output OUTPUT. json, list": "",
|
||||
"minikube is exiting due to an error. If the above message is not useful, open an issue:": "由于出错 minikube 正在退出。如果以上信息没有帮助,请提交问题反馈:",
|
||||
"minikube is unable to access the Google Container Registry. You may need to configure it to use a HTTP proxy.": "",
|
||||
"minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --driver\n\t- Use --force to override this connectivity check": "",
|
||||
"minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --vm-driver\n\t- Use --force to override this connectivity check": "",
|
||||
"minikube is unable to connect to the VM: {{.error}}\n\nThis is likely due to one of two reasons:\n\n- VPN or firewall interference\n- {{.hypervisor}} network configuration issue\n\nSuggested workarounds:\n\n- Disable your local VPN or firewall software\n- Configure your local VPN or firewall to allow access to {{.ip}}\n- Restart or reinstall {{.hypervisor}}\n- Use an alternative --vm-driver": "minikube 无法连接到虚拟机:{{.error}}\n\n可能是由于以下两个原因之一导致:\n\n-VPN 或防火墙冲突\n- {{.hypervisor}} 网络配置问题\n建议的方案:\n\n- 禁用本地的 VPN 或者防火墙软件\n- 配置本地 VPN 或防火墙软件,放行 {{.ip}}\n- 重启或者重装 {{.hypervisor}}\n- 使用另外的 --vm-driver",
|
||||
"minikube profile was successfully set to {{.profile_name}}": "",
|
||||
"minikube status --output OUTPUT. json, text": "",
|
||||
|
|
@ -655,14 +691,16 @@
|
|||
"mount failed": "",
|
||||
"namespaces to pause": "",
|
||||
"namespaces to unpause": "",
|
||||
"none driver does not support multi-node clusters": "",
|
||||
"not enough arguments ({{.ArgCount}}).\\nusage: minikube config set PROPERTY_NAME PROPERTY_VALUE": "",
|
||||
"pause containers": "暂停容器",
|
||||
"profile sets the current minikube profile, or gets the current profile if no arguments are provided. This is used to run and manage multiple minikube instance. You can return to the default minikube profile by running `minikube profile default`": "",
|
||||
"profile {{.name}} is not running.": "",
|
||||
"reload cached images.": "重新加载缓存的镜像",
|
||||
"reloads images previously added using the 'cache add' subcommand": "重新加载之前通过子命令 'cache add' 添加的镜像",
|
||||
"retrieving node": "",
|
||||
"saving node": "",
|
||||
"service {{.namespace_name}}/{{.service_name}} has no node port": "",
|
||||
"setting up certs": "",
|
||||
"stat failed": "",
|
||||
"status json failure": "",
|
||||
"status text failure": "",
|
||||
|
|
@ -687,16 +725,17 @@
|
|||
"usage: minikube delete": "",
|
||||
"usage: minikube profile [MINIKUBE_PROFILE_NAME]": "",
|
||||
"zsh completion failed": "",
|
||||
"{{.cluster}} IP has been updated to point at {{.ip}}": "",
|
||||
"{{.cluster}} IP was already correctly configured for {{.ip}}": "",
|
||||
"{{.driver_name}} \"{{.cluster}}\" {{.machine_type}} is missing, will recreate.": "",
|
||||
"{{.driver}} does not appear to be installed": "似乎并未安装 {{.driver}}",
|
||||
"{{.driver}} does not appear to be installed, but is specified by an existing profile. Please run 'minikube delete' or install {{.driver}}": "似乎并未安装 {{.driver}},但已被当前的配置文件指定。请执行 'minikube delete' 或者安装 {{.driver}}",
|
||||
"{{.extra_option_component_name}}.{{.key}}={{.value}}": "",
|
||||
"{{.machine}} IP has been updated to point at {{.ip}}": "",
|
||||
"{{.machine}} IP was already correctly configured for {{.ip}}": "",
|
||||
"{{.name}} cluster does not exist": "",
|
||||
"{{.name}} has no available configuration options": "",
|
||||
"{{.name}} is already running": "",
|
||||
"{{.name}} was successfully configured": "",
|
||||
"{{.path}} is version {{.client_version}}, and is incompatible with Kubernetes {{.cluster_version}}. You will need to update {{.path}} or use 'minikube kubectl' to connect with this cluster": "{{.path}} 的版本是 {{.client_version}},且与 Kubernetes {{.cluster_version}} 不兼容。您需要更新 {{.path}} 或者使用 'minikube kubectl' 连接到这个集群",
|
||||
"{{.path}} is v{{.client_version}}, which may be incompatible with Kubernetes v{{.cluster_version}}.": "",
|
||||
"{{.prefix}}minikube {{.version}} on {{.platform}}": "{{.platform}} 上的 {{.prefix}}minikube {{.version}}",
|
||||
"{{.type}} is not yet a supported filesystem. We will try anyways!": "",
|
||||
"{{.url}} is not accessible: {{.error}}": ""
|
||||
|
|
|
|||
Loading…
Reference in New Issue