pull/12265/head
Predrag Rogic 2021-08-24 04:20:52 +01:00
commit 6efbd1ce13
No known key found for this signature in database
GPG Key ID: F1FF5748C4855229
64 changed files with 5136 additions and 1143 deletions

View File

@ -20,7 +20,10 @@ jobs:
id: gendocs
run: |
make generate-docs
echo "::set-output name=changes::$(git status --porcelain)"
c=$(git status --porcelain)
c="${c//$'\n'/'%0A'}"
c="${c//$'\r'/'%0D'}"
echo "::set-output name=changes::$c"
- name: Create PR
if: ${{ steps.gendocs.outputs.changes != '' }}
uses: peter-evans/create-pull-request@v3
@ -37,6 +40,7 @@ jobs:
body: |
Committing changes resulting from `make generate-docs`.
This PR is auto-generated by the [gendocs](https://github.com/kubernetes/minikube/blob/master/.github/workflows/docs.yml) CI workflow.
```
${{ steps.gendocs.outputs.changes }}
```

View File

@ -21,7 +21,10 @@ jobs:
id: leaderboard
run: |
make update-leaderboard
echo "::set-output name=changes::$(git status --porcelain)"
c=$(git status --porcelain)
c="${c//$'\n'/'%0A'}"
c="${c//$'\r'/'%0D'}"
echo "::set-output name=changes::$c"
env:
GITHUB_TOKEN: ${{ secrets.MINIKUBE_BOT_PAT }}
- name: Create PR
@ -40,6 +43,7 @@ jobs:
body: |
Committing changes resulting from `make update-leaderboard`.
This PR is auto-generated by the [update-leaderboard](https://github.com/kubernetes/minikube/blob/master/.github/workflows/leaderboard.yml) CI workflow.
```
${{ steps.leaderboard.outputs.changes }}
```

View File

@ -20,7 +20,10 @@ jobs:
id: bumpk8s
run: |
make update-kubernetes-version
echo "::set-output name=changes::$(git status --porcelain)"
c=$(git status --porcelain)
c="${c//$'\n'/'%0A'}"
c="${c//$'\r'/'%0D'}"
echo "::set-output name=changes::$c"
- name: Create PR
if: ${{ steps.bumpk8s.outputs.changes != '' }}
uses: peter-evans/create-pull-request@v3
@ -39,5 +42,7 @@ jobs:
This PR was auto-generated by `make update-kubernetes-version` using [update-k8s-versions.yml](https://github.com/kubernetes/minikube/tree/master/.github/workflows) CI Workflow.
Please only merge if all the tests pass.
```
${{ steps.bumpk8s.outputs.changes }}
```

View File

@ -23,7 +23,7 @@ KUBERNETES_VERSION ?= $(shell egrep "DefaultKubernetesVersion =" pkg/minikube/co
KIC_VERSION ?= $(shell egrep "Version =" pkg/drivers/kic/types.go | cut -d \" -f2)
# Default to .0 for higher cache hit rates, as build increments typically don't require new ISO versions
ISO_VERSION ?= v1.22.0-1628622362-12032
ISO_VERSION ?= v1.22.0-1628974786-12268
# Dashes are valid in semver, but not Linux packaging. Use ~ to delimit alpha/beta
DEB_VERSION ?= $(subst -,~,$(RAW_VERSION))
DEB_REVISION ?= 0
@ -40,7 +40,7 @@ KVM_GO_VERSION ?= $(GO_VERSION:.0=)
INSTALL_SIZE ?= $(shell du out/minikube-windows-amd64.exe | cut -f1)
BUILDROOT_BRANCH ?= 2020.02.12
BUILDROOT_BRANCH ?= 2021.02.4
REGISTRY ?= gcr.io/k8s-minikube
# Get git commit id
@ -66,10 +66,10 @@ MINIKUBE_BUCKET ?= minikube/releases
MINIKUBE_UPLOAD_LOCATION := gs://${MINIKUBE_BUCKET}
MINIKUBE_RELEASES_URL=https://github.com/kubernetes/minikube/releases/download
KERNEL_VERSION ?= 4.19.182
KERNEL_VERSION ?= 4.19.202
# latest from https://github.com/golangci/golangci-lint/releases
# update this only by running `make update-golint-version`
GOLINT_VERSION ?= v1.41.1
GOLINT_VERSION ?= v1.42.0
# Limit number of default jobs, to avoid the CI builds running out of memory
GOLINT_JOBS ?= 4
# see https://github.com/golangci/golangci-lint#memory-usage-of-golangci-lint
@ -282,8 +282,6 @@ minikube_iso: deploy/iso/minikube-iso/board/coreos/minikube/rootfs-overlay/usr/b
git clone --depth=1 --branch=$(BUILDROOT_BRANCH) https://github.com/buildroot/buildroot $(BUILD_DIR)/buildroot; \
fi;
$(MAKE) BR2_EXTERNAL=../../deploy/iso/minikube-iso minikube_defconfig -C $(BUILD_DIR)/buildroot
mkdir -p $(BUILD_DIR)/buildroot/output/build
echo "module buildroot.org/go" > $(BUILD_DIR)/buildroot/output/build/go.mod
$(MAKE) -C $(BUILD_DIR)/buildroot host-python
$(MAKE) -C $(BUILD_DIR)/buildroot
mv $(BUILD_DIR)/buildroot/output/images/rootfs.iso9660 $(BUILD_DIR)/minikube.iso

View File

@ -20,6 +20,7 @@ limitations under the License.
package cmd
import (
"encoding/json"
"fmt"
"io"
"net"
@ -33,6 +34,7 @@ import (
apiWait "k8s.io/apimachinery/pkg/util/wait"
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"
"k8s.io/klog/v2"
kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants"
@ -384,12 +386,94 @@ func dockerSetScript(ec DockerEnvConfig, w io.Writer) error {
dockerSetEnvTmpl = dockerEnvTCPTmpl
}
envVars := dockerEnvVars(ec)
if ec.Shell == "none" {
switch outputFormat {
case "":
// shell "none"
break
case "text":
for k, v := range envVars {
_, err := fmt.Fprintf(w, "%s=%s\n", k, v)
if err != nil {
return err
}
}
return nil
case "json":
json, err := json.Marshal(envVars)
if err != nil {
return err
}
_, err = w.Write(json)
if err != nil {
return err
}
_, err = w.Write([]byte{'\n'})
if err != nil {
return err
}
return nil
case "yaml":
yaml, err := yaml.Marshal(envVars)
if err != nil {
return err
}
_, err = w.Write(yaml)
if err != nil {
return err
}
return nil
default:
exit.Message(reason.InternalOutputUsage, "error: --output must be 'text', 'yaml' or 'json'")
}
}
return shell.SetScript(ec.EnvConfig, w, dockerSetEnvTmpl, dockerShellCfgSet(ec, envVars))
}
// dockerSetScript writes out a shell-compatible 'docker-env unset' script
func dockerUnsetScript(ec DockerEnvConfig, w io.Writer) error {
vars := dockerEnvNames(ec)
if ec.Shell == "none" {
switch outputFormat {
case "":
// shell "none"
break
case "text":
for _, n := range vars {
_, err := fmt.Fprintf(w, "%s\n", n)
if err != nil {
return err
}
}
return nil
case "json":
json, err := json.Marshal(vars)
if err != nil {
return err
}
_, err = w.Write(json)
if err != nil {
return err
}
_, err = w.Write([]byte{'\n'})
if err != nil {
return err
}
return nil
case "yaml":
yaml, err := yaml.Marshal(vars)
if err != nil {
return err
}
_, err = w.Write(yaml)
if err != nil {
return err
}
return nil
default:
exit.Message(reason.InternalOutputUsage, "error: --output must be 'text', 'yaml' or 'json'")
}
}
return shell.UnsetScript(ec.EnvConfig, w, vars)
}
@ -508,5 +592,6 @@ func init() {
dockerEnvCmd.Flags().BoolVar(&sshHost, "ssh-host", false, "Use SSH connection instead of HTTPS (port 2376)")
dockerEnvCmd.Flags().BoolVar(&sshAdd, "ssh-add", false, "Add SSH identity key to SSH authentication agent")
dockerEnvCmd.Flags().StringVar(&shell.ForceShell, "shell", "", "Force environment to be configured for a specified shell: [fish, cmd, powershell, tcsh, bash, zsh], default is auto-detect")
dockerEnvCmd.Flags().StringVarP(&outputFormat, "output", "o", "", "One of 'text', 'yaml' or 'json'.")
dockerEnvCmd.Flags().BoolVarP(&dockerUnset, "unset", "u", false, "Unset variables instead of setting them")
}

View File

@ -18,10 +18,14 @@ package cmd
import (
"bytes"
"encoding/json"
"os"
"strings"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"gopkg.in/yaml.v2"
)
type FakeNoProxyGetter struct {
@ -36,13 +40,16 @@ func (f FakeNoProxyGetter) GetNoProxyVar() (string, string) {
func TestGenerateDockerScripts(t *testing.T) {
var tests = []struct {
shell string
output string
config DockerEnvConfig
noProxyGetter *FakeNoProxyGetter
wantSet string
wantUnset string
diffOpts []cmp.Option
}{
{
"bash",
"",
DockerEnvConfig{profile: "dockerdriver", driver: "docker", hostIP: "127.0.0.1", port: 32842, certsDir: "/certs"},
nil,
`export DOCKER_TLS_VERIFY="1"
@ -58,9 +65,11 @@ unset DOCKER_HOST;
unset DOCKER_CERT_PATH;
unset MINIKUBE_ACTIVE_DOCKERD;
`,
nil,
},
{
"bash",
"",
DockerEnvConfig{profile: "dockerdriver", driver: "docker", ssh: true, username: "root", hostname: "host", sshport: 22},
nil,
`export DOCKER_HOST="ssh://root@host:22"
@ -74,9 +83,11 @@ unset DOCKER_HOST;
unset DOCKER_CERT_PATH;
unset MINIKUBE_ACTIVE_DOCKERD;
`,
nil,
},
{
"bash",
"",
DockerEnvConfig{profile: "bash", driver: "kvm2", hostIP: "127.0.0.1", port: 2376, certsDir: "/certs"},
nil,
`export DOCKER_TLS_VERIFY="1"
@ -92,9 +103,11 @@ unset DOCKER_HOST;
unset DOCKER_CERT_PATH;
unset MINIKUBE_ACTIVE_DOCKERD;
`,
nil,
},
{
"bash",
"",
DockerEnvConfig{profile: "ipv6", driver: "kvm2", hostIP: "fe80::215:5dff:fe00:a903", port: 2376, certsDir: "/certs"},
nil,
`export DOCKER_TLS_VERIFY="1"
@ -110,9 +123,11 @@ unset DOCKER_HOST;
unset DOCKER_CERT_PATH;
unset MINIKUBE_ACTIVE_DOCKERD;
`,
nil,
},
{
"fish",
"",
DockerEnvConfig{profile: "fish", driver: "kvm2", hostIP: "127.0.0.1", port: 2376, certsDir: "/certs"},
nil,
`set -gx DOCKER_TLS_VERIFY "1";
@ -128,9 +143,11 @@ set -e DOCKER_HOST;
set -e DOCKER_CERT_PATH;
set -e MINIKUBE_ACTIVE_DOCKERD;
`,
nil,
},
{
"powershell",
"",
DockerEnvConfig{profile: "powershell", driver: "hyperv", hostIP: "192.168.0.1", port: 2376, certsDir: "/certs"},
nil,
`$Env:DOCKER_TLS_VERIFY = "1"
@ -146,9 +163,11 @@ Remove-Item Env:\\DOCKER_HOST
Remove-Item Env:\\DOCKER_CERT_PATH
Remove-Item Env:\\MINIKUBE_ACTIVE_DOCKERD
`,
nil,
},
{
"cmd",
"",
DockerEnvConfig{profile: "cmd", driver: "hyperv", hostIP: "192.168.0.1", port: 2376, certsDir: "/certs"},
nil,
`SET DOCKER_TLS_VERIFY=1
@ -164,9 +183,11 @@ SET DOCKER_HOST=
SET DOCKER_CERT_PATH=
SET MINIKUBE_ACTIVE_DOCKERD=
`,
nil,
},
{
"emacs",
"",
DockerEnvConfig{profile: "emacs", driver: "hyperv", hostIP: "192.168.0.1", port: 2376, certsDir: "/certs"},
nil,
`(setenv "DOCKER_TLS_VERIFY" "1")
@ -181,9 +202,11 @@ SET MINIKUBE_ACTIVE_DOCKERD=
(setenv "DOCKER_CERT_PATH" nil)
(setenv "MINIKUBE_ACTIVE_DOCKERD" nil)
`,
nil,
},
{
"bash",
"",
DockerEnvConfig{profile: "bash-no-proxy", driver: "kvm2", hostIP: "127.0.0.1", port: 2376, certsDir: "/certs", noProxy: true},
&FakeNoProxyGetter{"NO_PROXY", "127.0.0.1"},
`export DOCKER_TLS_VERIFY="1"
@ -202,9 +225,11 @@ unset DOCKER_CERT_PATH;
unset MINIKUBE_ACTIVE_DOCKERD;
unset NO_PROXY;
`,
nil,
},
{
"bash",
"",
DockerEnvConfig{profile: "bash-no-proxy-lower", driver: "kvm2", hostIP: "127.0.0.1", port: 2376, certsDir: "/certs", noProxy: true},
&FakeNoProxyGetter{"no_proxy", "127.0.0.1"},
`export DOCKER_TLS_VERIFY="1"
@ -223,9 +248,11 @@ unset DOCKER_CERT_PATH;
unset MINIKUBE_ACTIVE_DOCKERD;
unset no_proxy;
`,
nil,
},
{
"powershell",
"",
DockerEnvConfig{profile: "powershell-no-proxy-idempotent", driver: "hyperv", hostIP: "192.168.0.1", port: 2376, certsDir: "/certs", noProxy: true},
&FakeNoProxyGetter{"no_proxy", "192.168.0.1"},
`$Env:DOCKER_TLS_VERIFY = "1"
@ -243,9 +270,11 @@ Remove-Item Env:\\DOCKER_CERT_PATH
Remove-Item Env:\\MINIKUBE_ACTIVE_DOCKERD
Remove-Item Env:\\no_proxy
`,
nil,
},
{
"bash",
"",
DockerEnvConfig{profile: "sh-no-proxy-add", driver: "kvm2", hostIP: "127.0.0.1", port: 2376, certsDir: "/certs", noProxy: true},
&FakeNoProxyGetter{"NO_PROXY", "192.168.0.1,10.0.0.4"},
`export DOCKER_TLS_VERIFY="1"
@ -264,9 +293,11 @@ unset DOCKER_CERT_PATH;
unset MINIKUBE_ACTIVE_DOCKERD;
unset NO_PROXY;
`,
nil,
},
{
"none",
"",
DockerEnvConfig{profile: "noneshell", driver: "docker", hostIP: "127.0.0.1", port: 32842, certsDir: "/certs"},
nil,
`DOCKER_TLS_VERIFY=1
@ -279,11 +310,91 @@ DOCKER_HOST
DOCKER_CERT_PATH
MINIKUBE_ACTIVE_DOCKERD
`,
nil,
},
{
"none",
"text",
DockerEnvConfig{profile: "nonetext", driver: "docker", hostIP: "127.0.0.1", port: 32842, certsDir: "/certs"},
nil,
`DOCKER_TLS_VERIFY=1
DOCKER_HOST=tcp://127.0.0.1:32842
DOCKER_CERT_PATH=/certs
MINIKUBE_ACTIVE_DOCKERD=nonetext
`,
`DOCKER_TLS_VERIFY
DOCKER_HOST
DOCKER_CERT_PATH
MINIKUBE_ACTIVE_DOCKERD
`,
[]cmp.Option{
cmpopts.AcyclicTransformer("SplitLines", func(s string) []string {
return strings.Split(s, "\n")
}),
cmpopts.SortSlices(func(a, b string) bool {
return a < b
}),
},
},
{
"none",
"json",
DockerEnvConfig{profile: "nonejson", driver: "docker", hostIP: "127.0.0.1", port: 32842, certsDir: "/certs"},
nil,
`{
"DOCKER_TLS_VERIFY": "1",
"DOCKER_HOST": "tcp://127.0.0.1:32842",
"DOCKER_CERT_PATH": "/certs",
"MINIKUBE_ACTIVE_DOCKERD": "nonejson"
}`,
`[
"DOCKER_TLS_VERIFY",
"DOCKER_HOST",
"DOCKER_CERT_PATH",
"MINIKUBE_ACTIVE_DOCKERD"
]`,
[]cmp.Option{
cmp.FilterValues(func(x, y string) bool {
return json.Valid([]byte(x)) && json.Valid([]byte(y))
},
cmp.Transformer("ParseJSON", func(in string) (out interface{}) {
if err := json.Unmarshal([]byte(in), &out); err != nil {
panic(err) // should never occur given previous filter to ensure valid JSON
}
return out
})),
},
},
{
"none",
"yaml",
DockerEnvConfig{profile: "noneyaml", driver: "docker", hostIP: "127.0.0.1", port: 32842, certsDir: "/certs"},
nil,
`DOCKER_TLS_VERIFY: "1"
DOCKER_HOST: tcp://127.0.0.1:32842
DOCKER_CERT_PATH: /certs
MINIKUBE_ACTIVE_DOCKERD: noneyaml
`,
`- DOCKER_TLS_VERIFY
- DOCKER_HOST
- DOCKER_CERT_PATH
- MINIKUBE_ACTIVE_DOCKERD
`,
[]cmp.Option{
cmpopts.AcyclicTransformer("ParseYAML", func(in string) (out interface{}) {
if err := yaml.Unmarshal([]byte(in), &out); err != nil {
return nil
}
return out
}),
},
},
}
for _, tc := range tests {
t.Run(tc.config.profile, func(t *testing.T) {
tc.config.EnvConfig.Shell = tc.shell
// set global variable
outputFormat = tc.output
defaultNoProxyGetter = tc.noProxyGetter
var b []byte
buf := bytes.NewBuffer(b)
@ -291,7 +402,7 @@ MINIKUBE_ACTIVE_DOCKERD
t.Errorf("setScript(%+v) error: %v", tc.config, err)
}
got := buf.String()
if diff := cmp.Diff(tc.wantSet, got); diff != "" {
if diff := cmp.Diff(tc.wantSet, got, tc.diffOpts...); diff != "" {
t.Errorf("setScript(%+v) mismatch (-want +got):\n%s\n\nraw output:\n%s\nquoted: %q", tc.config, diff, got, got)
}
@ -300,7 +411,7 @@ MINIKUBE_ACTIVE_DOCKERD
t.Errorf("unsetScript(%+v) error: %v", tc.config, err)
}
got = buf.String()
if diff := cmp.Diff(tc.wantUnset, got); diff != "" {
if diff := cmp.Diff(tc.wantUnset, got, tc.diffOpts...); diff != "" {
t.Errorf("unsetScript(%+v) mismatch (-want +got):\n%s\n\nraw output:\n%s\nquoted: %q", tc.config, diff, got, got)
}

View File

@ -356,6 +356,24 @@ $ minikube image tag source target
},
}
var pushImageCmd = &cobra.Command{
Use: "push",
Short: "Push images",
Example: `
$ minikube image push busybox
`,
Run: func(cmd *cobra.Command, args []string) {
profile, err := config.LoadProfile(viper.GetString(config.ProfileName))
if err != nil {
exit.Error(reason.Usage, "loading profile", err)
}
if err := machine.PushImages(args, profile); err != nil {
exit.Error(reason.GuestImagePush, "Failed to push images", err)
}
},
}
func init() {
loadImageCmd.Flags().BoolVarP(&pull, "pull", "", false, "Pull the remote image (no caching)")
loadImageCmd.Flags().BoolVar(&imgDaemon, "daemon", false, "Cache image from docker daemon")
@ -375,4 +393,5 @@ func init() {
imageCmd.AddCommand(saveImageCmd)
imageCmd.AddCommand(listImageCmd)
imageCmd.AddCommand(tagImageCmd)
imageCmd.AddCommand(pushImageCmd)
}

View File

@ -35,6 +35,7 @@ import (
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/detect"
"k8s.io/minikube/pkg/minikube/download"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/exit"
@ -480,6 +481,17 @@ func generateNewConfigFromFlags(cmd *cobra.Command, k8sVersion string, drvName s
cc.ContainerVolumeMounts = []string{viper.GetString(mountString)}
}
if detect.IsCloudShell() {
err := cc.KubernetesConfig.ExtraOptions.Set("kubelet.cgroups-per-qos=false")
if err != nil {
exit.Error(reason.InternalConfigSet, "failed to set cloud shell kubelet config options", err)
}
err = cc.KubernetesConfig.ExtraOptions.Set("kubelet.enforce-node-allocatable=\"\"")
if err != nil {
exit.Error(reason.InternalConfigSet, "failed to set cloud shell kubelet config options", err)
}
}
return cc
}

View File

@ -86,7 +86,7 @@ var tunnelCmd = &cobra.Command{
sshPort := strconv.Itoa(port)
sshKey := filepath.Join(localpath.MiniPath(), "machines", cname, "id_rsa")
kicSSHTunnel := kic.NewSSHTunnel(ctx, sshPort, sshKey, clientset.CoreV1())
kicSSHTunnel := kic.NewSSHTunnel(ctx, sshPort, sshKey, clientset.CoreV1(), clientset.NetworkingV1())
err = kicSSHTunnel.Start()
if err != nil {
exit.Error(reason.SvcTunnelStart, "error starting tunnel", err)

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apiextensions.k8s.io/v1beta1
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: ambassadorinstallations.getambassador.io

View File

@ -24,7 +24,7 @@ metadata:
app.kubernetes.io/part-of: kube-system
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: minikube-ingress-dns
@ -47,7 +47,7 @@ rules:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: minikube-ingress-dns

View File

@ -8,7 +8,7 @@ metadata:
addonmanager.kubernetes.io/mode: EnsureExists
...
---
apiVersion: apiextensions.k8s.io/v1beta1
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: istiooperators.install.istio.io

View File

@ -0,0 +1,78 @@
From 2b512af2ddaae01926fdcc9056b71017cac2a8d2 Mon Sep 17 00:00:00 2001
From: Tamir Duberstein <tamird@google.com>
Date: Thu, 25 Feb 2021 16:44:46 -0500
Subject: [PATCH] dist: generate stub go.mod in workdir
(cherry picked from commit c6374f516206c02b905d0d76ee1a66dab6fcd212)
---
src/cmd/dist/build.go | 26 ++++++--------------------
1 file changed, 6 insertions(+), 20 deletions(-)
diff --git a/src/cmd/dist/build.go b/src/cmd/dist/build.go
index 9e2b4f33b8..e5a7f9e9c4 100644
--- a/src/cmd/dist/build.go
+++ b/src/cmd/dist/build.go
@@ -110,9 +110,6 @@ func xinit() {
fatalf("$GOROOT must be set")
}
goroot = filepath.Clean(b)
- if modRoot := findModuleRoot(goroot); modRoot != "" {
- fatalf("found go.mod file in %s: $GOROOT must not be inside a module", modRoot)
- }
b = os.Getenv("GOROOT_FINAL")
if b == "" {
@@ -244,6 +241,9 @@ func xinit() {
os.Setenv("LANGUAGE", "en_US.UTF8")
workdir = xworkdir()
+ if err := ioutil.WriteFile(pathf("%s/go.mod", workdir), []byte("module bootstrap"), 0666); err != nil {
+ fatalf("cannot write stub go.mod: %s", err)
+ }
xatexit(rmworkdir)
tooldir = pathf("%s/pkg/tool/%s_%s", goroot, gohostos, gohostarch)
@@ -1484,11 +1484,11 @@ func goCmd(goBinary string, cmd string, args ...string) {
goCmd = append(goCmd, "-p=1")
}
- run(goroot, ShowOutput|CheckExit, append(goCmd, args...)...)
+ run(workdir, ShowOutput|CheckExit, append(goCmd, args...)...)
}
func checkNotStale(goBinary string, targets ...string) {
- out := run(goroot, CheckExit,
+ out := run(workdir, CheckExit,
append([]string{
goBinary,
"list", "-gcflags=all=" + gogcflags, "-ldflags=all=" + goldflags,
@@ -1498,7 +1498,7 @@ func checkNotStale(goBinary string, targets ...string) {
os.Setenv("GODEBUG", "gocachehash=1")
for _, target := range []string{"runtime/internal/sys", "cmd/dist", "cmd/link"} {
if strings.Contains(out, "STALE "+target) {
- run(goroot, ShowOutput|CheckExit, goBinary, "list", "-f={{.ImportPath}} {{.Stale}}", target)
+ run(workdir, ShowOutput|CheckExit, goBinary, "list", "-f={{.ImportPath}} {{.Stale}}", target)
break
}
}
@@ -1590,20 +1590,6 @@ func checkCC() {
}
}
-func findModuleRoot(dir string) (root string) {
- for {
- if fi, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil && !fi.IsDir() {
- return dir
- }
- d := filepath.Dir(dir)
- if d == dir {
- break
- }
- dir = d
- }
- return ""
-}
-
func defaulttarg() string {
// xgetwd might return a path with symlinks fully resolved, and if
// there happens to be symlinks in goroot, then the hasprefix test

View File

@ -0,0 +1,2 @@
net.ipv4.conf.lxc*.rp_filter = 0
net.ipv4.conf.cilium_*.rp_filter = 0

View File

@ -18,13 +18,12 @@ BR2_ROOTFS_USERS_TABLES="$(BR2_EXTERNAL_MINIKUBE_PATH)/board/coreos/minikube/use
BR2_ROOTFS_OVERLAY="$(BR2_EXTERNAL_MINIKUBE_PATH)/board/coreos/minikube/rootfs-overlay"
BR2_LINUX_KERNEL=y
BR2_LINUX_KERNEL_CUSTOM_VERSION=y
BR2_LINUX_KERNEL_CUSTOM_VERSION_VALUE="4.19.182"
BR2_LINUX_KERNEL_CUSTOM_VERSION_VALUE="4.19.202"
BR2_LINUX_KERNEL_USE_CUSTOM_CONFIG=y
BR2_LINUX_KERNEL_CUSTOM_CONFIG_FILE="$(BR2_EXTERNAL_MINIKUBE_PATH)/board/coreos/minikube/linux_defconfig"
BR2_LINUX_KERNEL_LZ4=y
BR2_LINUX_KERNEL_NEEDS_HOST_LIBELF=y
BR2_PACKAGE_GZIP=y
BR2_PACKAGE_LZ4=y
BR2_PACKAGE_XZ=y
BR2_PACKAGE_STRACE=y
BR2_PACKAGE_SYSDIG=y
@ -37,6 +36,9 @@ BR2_PACKAGE_SSHFS=y
BR2_PACKAGE_XFSPROGS=y
BR2_PACKAGE_PARTED=y
BR2_PACKAGE_SYSSTAT=y
BR2_PACKAGE_LUAJIT=y
BR2_PACKAGE_LZ4=y
BR2_PACKAGE_LZ4_PROGS=y
BR2_PACKAGE_CA_CERTIFICATES=y
BR2_PACKAGE_LIBOPENSSL_BIN=y
BR2_PACKAGE_LIBCURL_CURL=y
@ -58,7 +60,9 @@ BR2_PACKAGE_PSMISC=y
BR2_PACKAGE_SYSTEMD_LOGIND=y
BR2_PACKAGE_SYSTEMD_MACHINED=y
BR2_PACKAGE_TAR=y
BR2_PACKAGE_UTIL_LINUX_BINARIES=y
BR2_PACKAGE_UTIL_LINUX_LOSETUP=y
BR2_PACKAGE_UTIL_LINUX_NOLOGIN=y
BR2_PACKAGE_UTIL_LINUX_NSENTER=y
BR2_PACKAGE_UTIL_LINUX_SCHEDUTILS=y
BR2_TARGET_ROOTFS_CPIO_GZIP=y

5
go.mod
View File

@ -16,7 +16,7 @@ require (
github.com/c4milo/gotoolkit v0.0.0-20190525173301-67483a18c17a // indirect
github.com/cenkalti/backoff/v4 v4.1.1
github.com/cheggaaa/pb/v3 v3.0.8
github.com/cloudevents/sdk-go/v2 v2.3.1
github.com/cloudevents/sdk-go/v2 v2.5.0
github.com/cloudfoundry-attic/jibber_jabber v0.0.0-20151120183258-bcc4c8345a21
github.com/cloudfoundry/jibber_jabber v0.0.0-20151120183258-bcc4c8345a21 // indirect
github.com/docker/docker v20.10.7+incompatible
@ -32,6 +32,7 @@ require (
github.com/gookit/color v1.4.2 // indirect
github.com/hashicorp/go-getter v1.5.7
github.com/hashicorp/go-retryablehttp v0.7.0
github.com/hashicorp/golang-lru v0.5.3 // indirect
github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 // indirect
github.com/hooklift/assert v0.0.0-20170704181755-9d1defd6d214 // indirect
github.com/hooklift/iso9660 v0.0.0-20170318115843-1cf07e5970d8
@ -80,7 +81,7 @@ require (
golang.org/x/build v0.0.0-20190927031335-2835ba2e683f
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2
golang.org/x/exp v0.0.0-20210220032938-85be41e4509f
golang.org/x/mod v0.4.2
golang.org/x/mod v0.5.0
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c

10
go.sum
View File

@ -200,8 +200,8 @@ github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudevents/sdk-go/v2 v2.3.1 h1:QRTu0yRA4FbznjRSds0/4Hy6cVYpWV2wInlNJSHWAtw=
github.com/cloudevents/sdk-go/v2 v2.3.1/go.mod h1:4fO2UjPMYYR1/7KPJQCwTPb0lFA8zYuitkUpAZFSY1Q=
github.com/cloudevents/sdk-go/v2 v2.5.0 h1:Ts6aLHbBUJfcNcZ4ouAfJ4+Np7SE1Yf2w4ADKRCd7Fo=
github.com/cloudevents/sdk-go/v2 v2.5.0/go.mod h1:nlXhgFkf0uTopxmRXalyMwS2LG70cRGPrxzmjJgSG0U=
github.com/cloudfoundry-attic/jibber_jabber v0.0.0-20151120183258-bcc4c8345a21 h1:Yg2hDs4b13Evkpj42FU2idX2cVXVFqQSheXYKM86Qsk=
github.com/cloudfoundry-attic/jibber_jabber v0.0.0-20151120183258-bcc4c8345a21/go.mod h1:MgJyK38wkzZbiZSKeIeFankxxSA8gayko/nr5x5bgBA=
github.com/cloudfoundry/jibber_jabber v0.0.0-20151120183258-bcc4c8345a21 h1:tuijfIjZyjZaHq9xDUh0tNitwXshJpbLkqMOJv4H3do=
@ -755,8 +755,6 @@ github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wn
github.com/libvirt/libvirt-go v3.9.0+incompatible h1:tcJOV5bCR8lWsifKnPCEnYSroD5rjuUkCBp/kv1kH/w=
github.com/libvirt/libvirt-go v3.9.0+incompatible/go.mod h1:34zsnB4iGeOv7Byj6qotuW8Ya4v4Tr43ttjz/F0wjLE=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
github.com/lightstep/tracecontext.go v0.0.0-20181129014701-1757c391b1ac h1:+2b6iGRJe3hvV/yVXrd41yVEjxuFHxasJqDhkIjS4gk=
github.com/lightstep/tracecontext.go v0.0.0-20181129014701-1757c391b1ac/go.mod h1:Frd2bnT3w5FB5q49ENTfVlztJES+1k/7lyWX2+9gq/M=
github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc=
github.com/lpabon/godbc v0.1.1/go.mod h1:Jo9QV0cf3U6jZABgiJ2skINAXb9j8m51r07g4KI92ZA=
github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f/go.mod h1:JpH9J1c9oX6otFSgdUHwUBUizmKlrMjxWnIAjff4m04=
@ -877,7 +875,6 @@ github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.2/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
@ -1256,8 +1253,9 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.5.0 h1:UG21uOlmZabA4fW5i7ZX6bjw1xELEGg/ZLgZq9auk/Q=
golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=

View File

@ -30,7 +30,7 @@ gcloud cloud-shell ssh --authorize-session << EOF
DRIVER="docker"
JOB_NAME="Docker_Cloud_Shell"
CONTAINER_RUNTIME="docker"
EXTRA_TEST_ARGS="-test.run (TestFunctional|TestAddons)"
EXTRA_TEST_ARGS="-test.run (TestFunctional|TestAddons|TestStartStop)"
# Need to set these in cloud-shell or will not be present in common.sh
MINIKUBE_LOCATION=$MINIKUBE_LOCATION

View File

@ -158,6 +158,14 @@ async function loadTestData() {
return [testData, responseDate];
}
Array.prototype.min = function() {
return this.reduce((acc, val) => Math.min(acc, val), Number.MAX_VALUE)
}
Array.prototype.max = function() {
return this.reduce((acc, val) => Math.max(acc, val), -Number.MAX_VALUE)
}
Array.prototype.sum = function() {
return this.reduce((sum, value) => sum + value, 0);
};
@ -256,14 +264,14 @@ function displayTestAndEnvironmentChart(testData, testName, environmentName) {
groupData.date,
groupData.flakeRate,
`<div style="padding: 1rem; font-family: 'Arial'; font-size: 14">
<b>${groupData.date.toString()}</b><br>
<b>Date:</b> ${groupData.date.toLocaleString([], {dateStyle: 'medium'})}<br>
<b>Flake Percentage:</b> ${groupData.flakeRate.toFixed(2)}%<br>
<b>Jobs:</b><br>
${groupData.jobs.map(({ id, status }) => ` - <a href="${testGopoghLink(id, environmentName, testName)}">${id}</a> (${status})`).join("<br>")}
</div>`,
groupData.duration,
`<div style="padding: 1rem; font-family: 'Arial'; font-size: 14">
<b>${groupData.date.toString()}</b><br>
<b>Date:</b> ${groupData.date.toLocaleString([], {dateStyle: 'medium'})}<br>
<b>Average Duration:</b> ${groupData.duration.toFixed(2)}s<br>
<b>Jobs:</b><br>
${groupData.jobs.map(({ id, duration }) => ` - <a href="${testGopoghLink(id, environmentName, testName)}">${id}</a> (${duration}s)`).join("<br>")}
@ -297,8 +305,8 @@ function displayTestAndEnvironmentChart(testData, testName, environmentName) {
}
{
const dates = testRuns.map(run => run.date.getTime());
const startDate = new Date(Math.min(...dates));
const endDate = new Date(Math.max(...dates));
const startDate = new Date(dates.min());
const endDate = new Date(dates.max());
const weekDates = [];
let currentDate = startDate;
@ -332,14 +340,14 @@ function displayTestAndEnvironmentChart(testData, testName, environmentName) {
groupData.date,
groupData.flakeRate,
`<div style="padding: 1rem; font-family: 'Arial'; font-size: 14">
<b>${groupData.date.toString()}</b><br>
<b>Date:</b> ${groupData.date.toLocaleString([], {dateStyle: 'medium'})}<br>
<b>Flake Percentage:</b> ${groupData.flakeRate.toFixed(2)}%<br>
<b>Jobs:</b><br>
${groupData.jobs.map(({ id, status }) => ` - <a href="${testGopoghLink(id, environmentName, testName)}">${id}</a> (${status})`).join("<br>")}
</div>`,
groupData.duration,
`<div style="padding: 1rem; font-family: 'Arial'; font-size: 14">
<b>${groupData.date.toString()}</b><br>
<b>Date:</b> ${groupData.date.toLocaleString([], {dateStyle: 'medium'})}<br>
<b>Average Duration:</b> ${groupData.duration.toFixed(2)}s<br>
<b>Jobs:</b><br>
${groupData.jobs.map(({ id, duration }) => ` - <a href="${testGopoghLink(id, environmentName, testName)}">${id}</a> (${duration}s)`).join("<br>")}
@ -474,7 +482,7 @@ function displayEnvironmentChart(testData, environmentName) {
data.flakeRate,
`<div style="padding: 1rem; font-family: 'Arial'; font-size: 14">
<b style="display: block">${name}</b><br>
<b>${data.date.toString()}</b><br>
<b>Date:</b> ${data.date.toLocaleString([], {dateStyle: 'medium'})}<br>
<b>Flake Percentage:</b> ${data.flakeRate.toFixed(2)}%<br>
<b>Jobs:</b><br>
${data.jobs.map(({ id, status }) => ` - <a href="${testGopoghLink(id, environmentName, name)}">${id}</a> (${status})`).join("<br>")}
@ -502,8 +510,8 @@ function displayEnvironmentChart(testData, environmentName) {
}
{
const dates = testData.map(run => run.date.getTime());
const startDate = new Date(Math.min(...dates));
const endDate = new Date(Math.max(...dates));
const startDate = new Date(dates.min());
const endDate = new Date(dates.max());
const weekDates = [];
let currentDate = startDate;
@ -551,7 +559,7 @@ function displayEnvironmentChart(testData, environmentName) {
data.flakeRate,
`<div style="padding: 1rem; font-family: 'Arial'; font-size: 14">
<b style="display: block">${name}</b><br>
<b>${data.date.toString()}</b><br>
<b>Date:</b> ${data.date.toLocaleString([], {dateStyle: 'medium'})}<br>
<b>Flake Percentage:</b> ${data.flakeRate.toFixed(2)}%<br>
<b>Jobs:</b><br>
${data.jobs.map(({ id, status }) => ` - <a href="${testGopoghLink(id, environmentName, name)}">${id}</a> (${status})`).join("<br>")}
@ -611,14 +619,14 @@ function displayEnvironmentChart(testData, environmentName) {
dateInfo.date,
dateInfo.testCount,
`<div style="padding: 1rem; font-family: 'Arial'; font-size: 14">
<b>${dateInfo.date.toString()}</b><br>
<b>Date:</b> ${dateInfo.date.toLocaleString([], {dateStyle: 'medium'})}<br>
<b>Test Count (averaged): </b> ${+dateInfo.testCount.toFixed(2)}<br>
<b>Jobs:</b><br>
${dateInfo.runInfo.map(job => ` - <a href="${testGopoghLink(job.rootJob, environmentName)}">${job.rootJob}</a> Test count: ${job.testCount}`).join("<br>")}
</div>`,
dateInfo.totalDuration,
`<div style="padding: 1rem; font-family: 'Arial'; font-size: 14">
<b>${dateInfo.date.toString()}</b><br>
<b>Date:</b> ${dateInfo.date.toLocaleString([], {dateStyle: 'medium'})}<br>
<b>Total Duration (averaged): </b> ${+dateInfo.totalDuration.toFixed(2)}<br>
<b>Jobs:</b><br>
${dateInfo.runInfo.map(job => ` - <a href="${testGopoghLink(job.rootJob, environmentName)}">${job.rootJob}</a> Total Duration: ${+job.totalDuration.toFixed(2)}s`).join("<br>")}

View File

@ -153,16 +153,8 @@ func EnableOrDisableAddon(cc *config.ClusterConfig, name string, val string) err
// to match both ingress and ingress-dns addons
if strings.HasPrefix(name, "ingress") && enable {
if driver.IsKIC(cc.Driver) {
if runtime.GOOS == "windows" {
if runtime.GOOS == "windows" || runtime.GOOS == "darwin" {
out.Styled(style.Tip, `After the addon is enabled, please run "minikube tunnel" and your ingress resources would be available at "127.0.0.1"`)
} else if runtime.GOOS != "linux" {
exit.Message(reason.Usage, `Due to networking limitations of driver {{.driver_name}} on {{.os_name}}, {{.addon_name}} addon is not supported.
Alternatively to use this addon you can use a vm-based driver:
'minikube start --vm=true'
To track the update on this work in progress feature please check:
https://github.com/kubernetes/minikube/issues/7332`, out.V{"driver_name": cc.Driver, "os_name": runtime.GOOS, "addon_name": name})
} else if driver.BareMetal(cc.Driver) {
out.WarningT(`Due to networking limitations of driver {{.driver_name}}, {{.addon_name}} addon is not fully supported. Try using a different driver.`,
out.V{"driver_name": cc.Driver, "addon_name": name})

View File

@ -31,8 +31,11 @@ func Pause(v semver.Version, mirror string) string {
// Note: changing this logic requires bumping the preload version
// Should match `PauseVersion` in:
// https://github.com/kubernetes/kubernetes/blob/master/cmd/kubeadm/app/constants/constants.go
pv := "3.4.1"
pv := "3.5"
// https://github.com/kubernetes/kubernetes/blob/master/cmd/kubeadm/app/constants/constants_unix.go
if semver.MustParseRange("<1.22.0-alpha.3")(v) {
pv = "3.4.1"
}
if semver.MustParseRange("<1.21.0-alpha.3")(v) {
pv = "3.2"
}
@ -71,8 +74,10 @@ func coreDNS(v semver.Version, mirror string) string {
if semver.MustParseRange("<1.21.0-alpha.1")(v) {
in = "coredns"
}
cv := "v1.8.0"
cv := "v1.8.4"
switch v.Minor {
case 21:
cv = "v1.8.0"
case 20, 19:
cv = "1.7.0"
case 18:
@ -96,7 +101,7 @@ func etcd(v semver.Version, mirror string) string {
// Note: changing this logic requires bumping the preload version
// Should match `DefaultEtcdVersion` in:
// https://github.com/kubernetes/kubernetes/blob/master/cmd/kubeadm/app/constants/constants.go
ev := "3.4.13-3"
ev := "3.5.0-0"
switch v.Minor {
case 19, 20, 21:
@ -163,18 +168,34 @@ func KindNet(repo string) string {
return path.Join(repo, "kindnetd:v20210326-1e038dc5")
}
// all calico images are from https://docs.projectcalico.org/manifests/calico.yaml
const calicoVersion = "v3.20.0"
const calicoRepo = "docker.io/calico"
// CalicoDaemonSet returns the image used for calicoDaemonSet
func CalicoDaemonSet(repo string) string {
if repo == "" {
repo = "calico"
}
return path.Join(repo, "node:v3.14.1")
return calicoCommon(repo, "node")
}
// CalicoDeployment returns the image used for calicoDeployment
func CalicoDeployment(repo string) string {
if repo == "" {
repo = "calico"
}
return path.Join(repo, "kube-controllers:v3.14.1")
return calicoCommon(repo, "kube-controllers")
}
// CalicoFelixDriver returns image used for felix driver
func CalicoFelixDriver(repo string) string {
return calicoCommon(repo, "pod2daemon-flexvol")
}
// CalicoBin returns image used for calico binary image
func CalicoBin(repo string) string {
return calicoCommon(repo, "cni")
}
func calicoCommon(repo string, name string) string {
if repo == "" {
repo = calicoRepo
}
return path.Join(repo, fmt.Sprintf("%s:%s", name, calicoVersion))
}

View File

@ -65,6 +65,15 @@ k8s.gcr.io/kube-proxy:v1.21.0
k8s.gcr.io/pause:3.4.1
k8s.gcr.io/etcd:3.4.13-0
k8s.gcr.io/coredns/coredns:v1.8.0
`, "\n"), "\n")},
{"v1.22.0", strings.Split(strings.Trim(`
k8s.gcr.io/kube-apiserver:v1.22.0
k8s.gcr.io/kube-controller-manager:v1.22.0
k8s.gcr.io/kube-scheduler:v1.22.0
k8s.gcr.io/kube-proxy:v1.22.0
k8s.gcr.io/pause:3.5
k8s.gcr.io/etcd:3.5.0-0
k8s.gcr.io/coredns/coredns:v1.8.4
`, "\n"), "\n")},
}
for _, tc := range testCases {

View File

@ -18,6 +18,8 @@ package cni
import (
"bytes"
// goembed needs this
_ "embed"
"text/template"
"github.com/pkg/errors"
@ -26,851 +28,12 @@ import (
"k8s.io/minikube/pkg/minikube/config"
)
// https://docs.projectcalico.org/manifests/calico.yaml
//go:embed calico.yaml
var calicoYaml string
// calicoTmpl is from https://docs.projectcalico.org/manifests/calico.yaml
var calicoTmpl = template.Must(template.New("calico").Parse(`---
# Source: calico/templates/calico-config.yaml
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# Typha is disabled.
typha_service_name: "none"
# Configure the backend to use.
calico_backend: "bird"
# Configure the MTU to use for workload interfaces and the
# tunnels. For IPIP, set to your network MTU - 20; for VXLAN
# set to your network MTU - 50.
veth_mtu: "1440"
# The CNI network configuration to install on each node. The special
# values in this config will be automatically populated.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"mtu": __CNI_MTU__,
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
},
{
"type": "bandwidth",
"capabilities": {"bandwidth": true}
}
]
}
---
# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: bgpconfigurations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BGPConfiguration
plural: bgpconfigurations
singular: bgpconfiguration
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: bgppeers.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BGPPeer
plural: bgppeers
singular: bgppeer
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: blockaffinities.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BlockAffinity
plural: blockaffinities
singular: blockaffinity
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterinformations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: ClusterInformation
plural: clusterinformations
singular: clusterinformation
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: felixconfigurations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: FelixConfiguration
plural: felixconfigurations
singular: felixconfiguration
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworkpolicies.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkPolicy
plural: globalnetworkpolicies
singular: globalnetworkpolicy
shortNames:
- gnp
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworksets.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkSet
plural: globalnetworksets
singular: globalnetworkset
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: hostendpoints.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: HostEndpoint
plural: hostendpoints
singular: hostendpoint
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamblocks.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPAMBlock
plural: ipamblocks
singular: ipamblock
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamconfigs.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPAMConfig
plural: ipamconfigs
singular: ipamconfig
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamhandles.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPAMHandle
plural: ipamhandles
singular: ipamhandle
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ippools.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPPool
plural: ippools
singular: ippool
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: kubecontrollersconfigurations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: KubeControllersConfiguration
plural: kubecontrollersconfigurations
singular: kubecontrollersconfiguration
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: networkpolicies.crd.projectcalico.org
spec:
scope: Namespaced
group: crd.projectcalico.org
version: v1
names:
kind: NetworkPolicy
plural: networkpolicies
singular: networkpolicy
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: networksets.crd.projectcalico.org
spec:
scope: Namespaced
group: crd.projectcalico.org
version: v1
names:
kind: NetworkSet
plural: networksets
singular: networkset
---
---
# Source: calico/templates/rbac.yaml
# Include a clusterrole for the kube-controllers component,
# and bind it to the calico-kube-controllers serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-kube-controllers
rules:
# Nodes are watched to monitor for deletions.
- apiGroups: [""]
resources:
- nodes
verbs:
- watch
- list
- get
# Pods are queried to check for existence.
- apiGroups: [""]
resources:
- pods
verbs:
- get
# IPAM resources are manipulated when nodes are deleted.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
verbs:
- list
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
# kube-controllers manages hostendpoints.
- apiGroups: ["crd.projectcalico.org"]
resources:
- hostendpoints
verbs:
- get
- list
- create
- update
- delete
# Needs access to update clusterinformations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- clusterinformations
verbs:
- get
- create
- update
# KubeControllersConfiguration is where it gets its config
- apiGroups: ["crd.projectcalico.org"]
resources:
- kubecontrollersconfigurations
verbs:
# read its own config
- get
# create a default if none exists
- create
# update status
- update
# watch for changes
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-kube-controllers
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-kube-controllers
subjects:
- kind: ServiceAccount
name: calico-kube-controllers
namespace: kube-system
---
# Include a clusterrole for the calico-node DaemonSet,
# and bind it to the calico-node serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-node
rules:
# The CNI plugin needs to get pods, nodes, and namespaces.
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
verbs:
- get
- apiGroups: [""]
resources:
- endpoints
- services
verbs:
# Used to discover service IPs for advertisement.
- watch
- list
# Used to discover Typhas.
- get
# Pod CIDR auto-detection on kubeadm needs access to config maps.
- apiGroups: [""]
resources:
- configmaps
verbs:
- get
- apiGroups: [""]
resources:
- nodes/status
verbs:
# Needed for clearing NodeNetworkUnavailable flag.
- patch
# Calico stores some configuration information in node annotations.
- update
# Watch for changes to Kubernetes NetworkPolicies.
- apiGroups: ["networking.k8s.io"]
resources:
- networkpolicies
verbs:
- watch
- list
# Used by Calico for policy information.
- apiGroups: [""]
resources:
- pods
- namespaces
- serviceaccounts
verbs:
- list
- watch
# The CNI plugin patches pods/status.
- apiGroups: [""]
resources:
- pods/status
verbs:
- patch
# Calico monitors various CRDs for config.
- apiGroups: ["crd.projectcalico.org"]
resources:
- globalfelixconfigs
- felixconfigurations
- bgppeers
- globalbgpconfigs
- bgpconfigurations
- ippools
- ipamblocks
- globalnetworkpolicies
- globalnetworksets
- networkpolicies
- networksets
- clusterinformations
- hostendpoints
- blockaffinities
verbs:
- get
- list
- watch
# Calico must create and update some CRDs on startup.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
- felixconfigurations
- clusterinformations
verbs:
- create
- update
# Calico stores some configuration information on the node.
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- watch
# These permissions are only required for upgrade from v2.6, and can
# be removed after upgrade or on fresh installations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- bgpconfigurations
- bgppeers
verbs:
- create
- update
# These permissions are required for Calico CNI to perform IPAM allocations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
- apiGroups: ["crd.projectcalico.org"]
resources:
- ipamconfigs
verbs:
- get
# Block affinities must also be watchable by confd for route aggregation.
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
verbs:
- watch
# The Calico IPAM migration needs to get daemonsets. These permissions can be
# removed if not upgrading from an installation using host-local IPAM.
- apiGroups: ["apps"]
resources:
- daemonsets
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: calico-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-node
subjects:
- kind: ServiceAccount
name: calico-node
namespace: kube-system
---
# Source: calico/templates/calico-node.yaml
# This manifest installs the calico-node container, as well
# as the CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: calico-node
annotations:
# This, along with the CriticalAddonsOnly toleration below,
# marks the pod as a critical add-on, ensuring it gets
# priority scheduling and that its resources are reserved
# if it ever gets evicted.
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
nodeSelector:
kubernetes.io/os: linux
hostNetwork: true
tolerations:
# Make sure calico-node gets scheduled on all nodes.
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
serviceAccountName: calico-node
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
priorityClassName: system-node-critical
initContainers:
# This container performs upgrade from host-local IPAM to calico-ipam.
# It can be deleted if this is a fresh installation, or if you have already
# upgraded to use calico-ipam.
- name: upgrade-ipam
image: calico/cni:v3.14.1
command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
env:
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
volumeMounts:
- mountPath: /var/lib/cni/networks
name: host-local-net-dir
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
securityContext:
privileged: true
# This container installs the CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: calico/cni:v3.14.1
command: ["/install-cni.sh"]
env:
# Name of the CNI config file to create.
- name: CNI_CONF_NAME
value: "10-calico.conflist"
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
# Set the hostname based on the k8s node name.
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# CNI MTU Config variable
- name: CNI_MTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Prevents the container from sleeping forever.
- name: SLEEP
value: "false"
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
securityContext:
privileged: true
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
# to communicate with Felix over the Policy Sync API.
- name: flexvol-driver
image: calico/pod2daemon-flexvol:v3.14.1
volumeMounts:
- name: flexvol-driver-host
mountPath: /host/driver
securityContext:
privileged: true
containers:
# Runs calico-node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: {{ .DaemonSetImageName }}
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
# Wait for the datastore.
- name: WAIT_FOR_DATASTORE
value: "true"
# Set based on the k8s node name.
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Choose the backend to use.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "k8s,bgp"
# Auto-detect the BGP IP address.
- name: IP
value: "autodetect"
# Enable IPIP
- name: CALICO_IPV4POOL_IPIP
value: "Always"
# Enable or Disable VXLAN on the default IP pool.
- name: CALICO_IPV4POOL_VXLAN
value: "Never"
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Set MTU for the VXLAN tunnel device.
- name: FELIX_VXLANMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
# chosen from this range. Changing this value after installation will have
# no effect. This should fall within --cluster-cidr
# - name: CALICO_IPV4POOL_CIDR
# value: "192.168.0.0/16"
# Disable file logging so kubectl logs works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
# Set Felix logging to "info"
- name: FELIX_LOGSEVERITYSCREEN
value: "info"
- name: FELIX_HEALTHENABLED
value: "true"
- name: IP_AUTODETECTION_METHOD
value: interface=eth.*
securityContext:
privileged: true
resources:
requests:
cpu: 250m
livenessProbe:
exec:
command:
- /bin/calico-node
- -felix-live
- -bird-live
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/calico-node
- -felix-ready
- -bird-ready
periodSeconds: 10
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
- name: policysync
mountPath: /var/run/nodeagent
volumes:
# Used by calico-node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: var-lib-calico
hostPath:
path: /var/lib/calico
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Mount in the directory for host-local IPAM allocations. This is
# used when upgrading from host-local to calico-ipam, and can be removed
# if not using the upgrade-ipam init container.
- name: host-local-net-dir
hostPath:
path: /var/lib/cni/networks
# Used to create per-pod Unix Domain Sockets
- name: policysync
hostPath:
type: DirectoryOrCreate
path: /var/run/nodeagent
# Used to install Flex Volume Driver
- name: flexvol-driver-host
hostPath:
type: DirectoryOrCreate
path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-node
namespace: kube-system
---
# Source: calico/templates/calico-kube-controllers.yaml
# See https://github.com/projectcalico/kube-controllers
apiVersion: apps/v1
kind: Deployment
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
# The controllers can only have a single active instance.
replicas: 1
selector:
matchLabels:
k8s-app: calico-kube-controllers
strategy:
type: Recreate
template:
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
nodeSelector:
kubernetes.io/os: linux
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule
serviceAccountName: calico-kube-controllers
priorityClassName: system-cluster-critical
containers:
- name: calico-kube-controllers
image: {{ .DeploymentImageName }}
env:
# Choose which controllers to run.
- name: ENABLED_CONTROLLERS
value: node
- name: DATASTORE_TYPE
value: kubernetes
readinessProbe:
exec:
command:
- /usr/bin/check-status
- -r
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-kube-controllers
namespace: kube-system
---
# Source: calico/templates/calico-etcd-secrets.yaml
---
# Source: calico/templates/calico-typha.yaml
---
# Source: calico/templates/configure-canal.yaml
`))
var calicoTmpl = template.Must(template.New("calico").Parse(calicoYaml))
// Calico is the Calico CNI manager
type Calico struct {
@ -878,8 +41,10 @@ type Calico struct {
}
type calicoTmplStruct struct {
DeploymentImageName string
DaemonSetImageName string
DeploymentImageName string
DaemonSetImageName string
FelixDriverImageName string
BinaryImageName string
}
// String returns a string representation of this CNI
@ -890,8 +55,10 @@ func (c Calico) String() string {
// manifest returns a Kubernetes manifest for a CNI
func (c Calico) manifest() (assets.CopyableFile, error) {
input := &calicoTmplStruct{
DeploymentImageName: images.CalicoDeployment(c.cc.KubernetesConfig.ImageRepository),
DaemonSetImageName: images.CalicoDaemonSet(c.cc.KubernetesConfig.ImageRepository),
DeploymentImageName: images.CalicoDeployment(c.cc.KubernetesConfig.ImageRepository),
DaemonSetImageName: images.CalicoDaemonSet(c.cc.KubernetesConfig.ImageRepository),
FelixDriverImageName: images.CalicoFelixDriver(c.cc.KubernetesConfig.ImageRepository),
BinaryImageName: images.CalicoBin(c.cc.KubernetesConfig.ImageRepository),
}
b := bytes.Buffer{}

4090
pkg/minikube/cni/calico.yaml Normal file

File diff suppressed because it is too large Load Diff

View File

@ -76,7 +76,7 @@ spec:
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
@ -105,7 +105,7 @@ rules:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:

View File

@ -426,6 +426,15 @@ func (r *Containerd) BuildImage(src string, file string, tag string, push bool,
return nil
}
// PushImage pushes an image
func (r *Containerd) PushImage(name string) error {
klog.Infof("Pushing image %s: %s", name)
c := exec.Command("sudo", "ctr", "-n=k8s.io", "images", "push", name)
if _, err := r.Runner.RunCmd(c); err != nil {
return errors.Wrapf(err, "ctr images push")
}
return nil
}
func (r *Containerd) initBuildkitDaemon() error {
// if daemon is already running, do nothing
cmd := exec.Command("pgrep", "buildkitd")

View File

@ -134,10 +134,9 @@ func pauseCRIContainers(cr CommandRunner, root string, ids []string) error {
args = append(args, "--root", root)
}
args = append(args, "pause")
cargs := args
for _, id := range ids {
cargs = append(cargs, id)
if _, err := cr.RunCmd(exec.Command("sudo", cargs...)); err != nil {
args := append(args, id)
if _, err := cr.RunCmd(exec.Command("sudo", args...)); err != nil {
return errors.Wrap(err, "runc")
}
}

View File

@ -260,6 +260,16 @@ func (r *CRIO) BuildImage(src string, file string, tag string, push bool, env []
return nil
}
// PushImage pushes an image
func (r *CRIO) PushImage(name string) error {
klog.Infof("Pushing image %s", name)
c := exec.Command("sudo", "podman", "push", name)
if _, err := r.Runner.RunCmd(c); err != nil {
return errors.Wrap(err, "crio push image")
}
return nil
}
// CGroupDriver returns cgroup driver ("cgroupfs" or "systemd")
func (r *CRIO) CGroupDriver() (string, error) {
c := exec.Command("crio", "config")

View File

@ -105,6 +105,8 @@ type Manager interface {
SaveImage(string, string) error
// Tag an image
TagImage(string, string) error
// Push an image from the runtime to the container registry
PushImage(string) error
// ImageExists takes image name and optionally image sha to check if an image exists
ImageExists(string, string) bool

View File

@ -288,6 +288,16 @@ func (r *Docker) BuildImage(src string, file string, tag string, push bool, env
return nil
}
// PushImage pushes an image
func (r *Docker) PushImage(name string) error {
klog.Infof("Pushing image: %s", name)
c := exec.Command("docker", "push", name)
if _, err := r.Runner.RunCmd(c); err != nil {
return errors.Wrap(err, "push image docker.")
}
return nil
}
// CGroupDriver returns cgroup driver ("cgroupfs" or "systemd")
func (r *Docker) CGroupDriver() (string, error) {
// Note: the server daemon has to be running, for this call to return successfully

View File

@ -40,7 +40,7 @@ const fileScheme = "file"
// DefaultISOURLs returns a list of ISO URL's to consult by default, in priority order
func DefaultISOURLs() []string {
v := version.GetISOVersion()
isoBucket := "minikube-builds/iso/12032"
isoBucket := "minikube-builds/iso/12268"
return []string{
fmt.Sprintf("https://storage.googleapis.com/%s/minikube-%s.iso", isoBucket, v),
fmt.Sprintf("https://github.com/kubernetes/minikube/releases/download/%s/minikube-%s.iso", v, v),

View File

@ -43,7 +43,7 @@ const (
// PreloadVersion is the current version of the preloaded tarball
//
// NOTE: You may need to bump this version up when upgrading auxiliary docker images
PreloadVersion = "v11"
PreloadVersion = "v12"
// PreloadBucket is the name of the GCS bucket where preloaded volume tarballs exist
PreloadBucket = "minikube-preloaded-volume-tarballs"
)

View File

@ -771,3 +771,84 @@ func TagImage(profile *config.Profile, source string, target string) error {
klog.Infof("failed tagging in: %s", strings.Join(failed, " "))
return nil
}
// pushImages pushes images from the container run time
func pushImages(cruntime cruntime.Manager, images []string) error {
klog.Infof("PushImages start: %s", images)
start := time.Now()
defer func() {
klog.Infof("PushImages completed in %s", time.Since(start))
}()
var g errgroup.Group
for _, image := range images {
image := image
g.Go(func() error {
return cruntime.PushImage(image)
})
}
if err := g.Wait(); err != nil {
return errors.Wrap(err, "error pushing images")
}
klog.Infoln("Successfully pushed images")
return nil
}
// PushImages push images on all nodes in profile
func PushImages(images []string, profile *config.Profile) error {
api, err := NewAPIClient()
if err != nil {
return errors.Wrap(err, "error creating api client")
}
defer api.Close()
succeeded := []string{}
failed := []string{}
pName := profile.Name
c, err := config.Load(pName)
if err != nil {
klog.Errorf("Failed to load profile %q: %v", pName, err)
return errors.Wrapf(err, "error loading config for profile :%v", pName)
}
for _, n := range c.Nodes {
m := config.MachineName(*c, n)
status, err := Status(api, m)
if err != nil {
klog.Warningf("error getting status for %s: %v", m, err)
continue
}
if status == state.Running.String() {
h, err := api.Load(m)
if err != nil {
klog.Warningf("Failed to load machine %q: %v", m, err)
continue
}
runner, err := CommandRunner(h)
if err != nil {
return err
}
cruntime, err := cruntime.New(cruntime.Config{Type: c.KubernetesConfig.ContainerRuntime, Runner: runner})
if err != nil {
return errors.Wrap(err, "error creating container runtime")
}
err = pushImages(cruntime, images)
if err != nil {
failed = append(failed, m)
klog.Warningf("Failed to push image for profile %s %v", pName, err.Error())
continue
}
succeeded = append(succeeded, m)
}
}
klog.Infof("succeeded pushing in: %s", strings.Join(succeeded, " "))
klog.Infof("failed pushing in: %s", strings.Join(failed, " "))
return nil
}

View File

@ -118,7 +118,7 @@ func Styled(st style.Enum, format string, a ...V) {
func boxedCommon(printFunc func(format string, a ...interface{}), cfg box.Config, title string, format string, a ...V) {
box := box.New(cfg)
if !useColor {
box.Config.Color = ""
box.Config.Color = nil
}
str := Sprintf(style.None, format, a...)
printFunc(box.String(title, strings.TrimSpace(str)))

View File

@ -24,6 +24,7 @@ import (
"k8s.io/minikube/pkg/minikube/out/register"
"k8s.io/minikube/pkg/minikube/reason"
"k8s.io/minikube/pkg/minikube/tests"
)
type buffFd struct {
@ -86,7 +87,7 @@ func TestDisplayProblem(t *testing.T) {
}
}
func TestDisplayJSON(t *testing.T) {
func TestDisplayProblemJSON(t *testing.T) {
defer SetJSON(false)
SetJSON(true)
@ -96,7 +97,6 @@ func TestDisplayJSON(t *testing.T) {
}{
{
k: &reason.Kind{
ID: "BUG",
ExitCode: 4,
Advice: "fix me!",
@ -117,12 +117,10 @@ func TestDisplayJSON(t *testing.T) {
return "random-id"
}
JSON = true
Error(*tc.k, "my error")
actual := buf.String()
if actual != tc.expected {
t.Fatalf("expected didn't match actual:\nExpected:\n%v\n\nActual:\n%v", tc.expected, actual)
}
actual := buf.Bytes()
tests.CompareJSON(t, actual, []byte(tc.expected))
})
}
}

View File

@ -21,6 +21,8 @@ import (
"fmt"
"os"
"testing"
"k8s.io/minikube/pkg/minikube/tests"
)
func TestPrintStep(t *testing.T) {
@ -39,11 +41,9 @@ func TestPrintStep(t *testing.T) {
}
PrintStep("message")
actual := buf.String()
actual := buf.Bytes()
if actual != expected {
t.Fatalf("expected didn't match actual:\nExpected:\n%v\n\nActual:\n%v", expected, actual)
}
tests.CompareJSON(t, actual, []byte(expected))
}
func TestPrintInfo(t *testing.T) {
@ -59,11 +59,9 @@ func TestPrintInfo(t *testing.T) {
}
PrintInfo("info")
actual := buf.String()
actual := buf.Bytes()
if actual != expected {
t.Fatalf("expected didn't match actual:\nExpected:\n%v\n\nActual:\n%v", expected, actual)
}
tests.CompareJSON(t, actual, []byte(expected))
}
func TestError(t *testing.T) {
@ -79,11 +77,9 @@ func TestError(t *testing.T) {
}
PrintError("error")
actual := buf.String()
actual := buf.Bytes()
if actual != expected {
t.Fatalf("expected didn't match actual:\nExpected:\n%v\n\nActual:\n%v", expected, actual)
}
tests.CompareJSON(t, actual, []byte(expected))
}
func TestErrorExitCode(t *testing.T) {
@ -99,10 +95,9 @@ func TestErrorExitCode(t *testing.T) {
}
PrintErrorExitCode("error", 5, map[string]string{"a": "b"}, map[string]string{"c": "d"})
actual := buf.String()
if actual != expected {
t.Fatalf("expected didn't match actual:\nExpected:\n%v\n\nActual:\n%v", expected, actual)
}
actual := buf.Bytes()
tests.CompareJSON(t, actual, []byte(expected))
}
func TestWarning(t *testing.T) {
@ -118,9 +113,7 @@ func TestWarning(t *testing.T) {
}
PrintWarning("warning")
actual := buf.String()
actual := buf.Bytes()
if actual != expected {
t.Fatalf("expected didn't match actual:\nExpected:\n%v\n\nActual:\n%v", expected, actual)
}
tests.CompareJSON(t, actual, []byte(expected))
}

View File

@ -21,6 +21,8 @@ import (
"fmt"
"os"
"testing"
"k8s.io/minikube/pkg/minikube/tests"
)
func TestSetCurrentStep(t *testing.T) {
@ -42,9 +44,7 @@ func TestSetCurrentStep(t *testing.T) {
}
PrintStep("message")
actual := buf.String()
actual := buf.Bytes()
if actual != expected {
t.Fatalf("expected didn't match actual:\nExpected:\n%v\n\nActual:\n%v", expected, actual)
}
tests.CompareJSON(t, actual, []byte(expected))
}

View File

@ -175,6 +175,37 @@ var hostIssues = []match{
},
Regexp: re(`Container.*is not running.*chown docker:docker`),
},
{
Kind: Kind{
ID: "HOST_CGROUP_NOT_SUPPORTED",
ExitCode: ExHostUnsupported,
Advice: `CGroup allocation is not available in your environment. You might be running minikube in a nested container. Try running:
minikube start --extra-config=kubelet.cgroups-per-qos=false --extra-config=kubelet.enforce-node-allocatable=""
`,
Issues: []int{12232},
},
Regexp: re(`Failed to start ContainerManager" err="Unit kubepods.slice already exists.`),
GOOS: []string{"linux"},
},
{
Kind: Kind{
ID: "HOST_ROOT_CGROUP",
ExitCode: ExHostUnsupported,
Advice: `CGroup allocation is not available in your environment, You might be running minikube in a nested container. Try running:
minikube start --extra-config=kubelet.cgroups-per-qos=false --extra-config=kubelet.enforce-node-allocatable=""
`,
Issues: []int{12232},
},
Regexp: re(`Failed to start ContainerManager" err="failed to initialize top level QOS containers: root container [kubepods] doesn't exist`),
GOOS: []string{"linux"},
},
{
Kind: Kind{
ID: "HOST_PIDS_CGROUP",

View File

@ -321,6 +321,8 @@ var (
GuestImageBuild = Kind{ID: "GUEST_IMAGE_BUILD", ExitCode: ExGuestError}
// minikube failed to push or save an image
GuestImageSave = Kind{ID: "GUEST_IMAGE_SAVE", ExitCode: ExGuestError}
// minikube failed to push an image
GuestImagePush = Kind{ID: "GUEST_IMAGE_PUSH", ExitCode: ExGuestError}
// minikube failed to tag an image
GuestImageTag = Kind{ID: "GUEST_IMAGE_TAG", ExitCode: ExGuestError}
// minikube failed to load host

View File

@ -0,0 +1,53 @@
/*
Copyright 2021 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package tests
import (
"encoding/json"
"reflect"
"testing"
)
// TestEvent simulates a CloudEvent for our JSON output
type TestEvent struct {
Data map[string]string `json:"data"`
Datacontenttype string `json:"datacontenttype"`
ID string `json:"id"`
Source string `json:"source"`
Specversion string `json:"specversion"`
Eventtype string `json:"type"`
}
// CompareJSON takes two byte slices, unmarshals them to TestEvent
// and compares them, failing the test if they don't match
func CompareJSON(t *testing.T, actual, expected []byte) {
var actualJSON, expectedJSON TestEvent
err := json.Unmarshal(actual, &actualJSON)
if err != nil {
t.Fatalf("error unmarshalling json: %v", err)
}
err = json.Unmarshal(expected, &expectedJSON)
if err != nil {
t.Fatalf("error unmarshalling json: %v", err)
}
if !reflect.DeepEqual(actualJSON, expectedJSON) {
t.Fatalf("expected didn't match actual:\nExpected:\n%v\n\nActual:\n%v", expected, actual)
}
}

View File

@ -36,7 +36,7 @@ type sshConn struct {
activeConn bool
}
func createSSHConn(name, sshPort, sshKey string, svc *v1.Service) *sshConn {
func createSSHConn(name, sshPort, sshKey string, resourcePorts []int32, resourceIP string, resourceName string) *sshConn {
// extract sshArgs
sshArgs := []string{
// TODO: document the options here
@ -50,17 +50,17 @@ func createSSHConn(name, sshPort, sshKey string, svc *v1.Service) *sshConn {
askForSudo := false
var privilegedPorts []int32
for _, port := range svc.Spec.Ports {
for _, port := range resourcePorts {
arg := fmt.Sprintf(
"-L %d:%s:%d",
port.Port,
svc.Spec.ClusterIP,
port.Port,
port,
resourceIP,
port,
)
// check if any port is privileged
if port.Port < 1024 {
privilegedPorts = append(privilegedPorts, port.Port)
if port < 1024 {
privilegedPorts = append(privilegedPorts, port)
askForSudo = true
}
@ -71,8 +71,8 @@ func createSSHConn(name, sshPort, sshKey string, svc *v1.Service) *sshConn {
if askForSudo && runtime.GOOS != "windows" {
out.Styled(
style.Warning,
"The service {{.service}} requires privileged ports to be exposed: {{.ports}}",
out.V{"service": svc.Name, "ports": fmt.Sprintf("%v", privilegedPorts)},
"The service/ingress {{.resource}} requires privileged ports to be exposed: {{.ports}}",
out.V{"resource": resourceName, "ports": fmt.Sprintf("%v", privilegedPorts)},
)
out.Styled(style.Permissions, "sudo permission will be asked for it.")
@ -89,7 +89,7 @@ func createSSHConn(name, sshPort, sshKey string, svc *v1.Service) *sshConn {
return &sshConn{
name: name,
service: svc.Name,
service: resourceName,
cmd: cmd,
activeConn: false,
}

View File

@ -23,8 +23,10 @@ import (
"time"
v1 "k8s.io/api/core/v1"
v1_networking "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
typed_core "k8s.io/client-go/kubernetes/typed/core/v1"
typed_networking "k8s.io/client-go/kubernetes/typed/networking/v1"
"k8s.io/klog/v2"
"k8s.io/minikube/pkg/minikube/tunnel"
@ -36,19 +38,21 @@ type SSHTunnel struct {
sshPort string
sshKey string
v1Core typed_core.CoreV1Interface
v1Networking typed_networking.NetworkingV1Interface
LoadBalancerEmulator tunnel.LoadBalancerEmulator
conns map[string]*sshConn
connsToStop map[string]*sshConn
}
// NewSSHTunnel ...
func NewSSHTunnel(ctx context.Context, sshPort, sshKey string, v1Core typed_core.CoreV1Interface) *SSHTunnel {
func NewSSHTunnel(ctx context.Context, sshPort, sshKey string, v1Core typed_core.CoreV1Interface, v1Networking typed_networking.NetworkingV1Interface) *SSHTunnel {
return &SSHTunnel{
ctx: ctx,
sshPort: sshPort,
sshKey: sshKey,
v1Core: v1Core,
LoadBalancerEmulator: tunnel.NewLoadBalancerEmulator(v1Core),
v1Networking: v1Networking,
conns: make(map[string]*sshConn),
connsToStop: make(map[string]*sshConn),
}
@ -73,6 +77,11 @@ func (t *SSHTunnel) Start() error {
klog.Errorf("error listing services: %v", err)
}
ingresses, err := t.v1Networking.Ingresses("").List(context.Background(), metav1.ListOptions{})
if err != nil {
klog.Errorf("error listing ingresses: %v", err)
}
t.markConnectionsToBeStopped()
for _, svc := range services.Items {
@ -81,6 +90,10 @@ func (t *SSHTunnel) Start() error {
}
}
for _, ingress := range ingresses.Items {
t.startConnectionIngress(ingress)
}
t.stopMarkedConnections()
// TODO: which time to use?
@ -104,8 +117,14 @@ func (t *SSHTunnel) startConnection(svc v1.Service) {
return
}
resourcePorts := []int32{}
for _, port := range svc.Spec.Ports {
resourcePorts = append(resourcePorts, port.Port)
}
// create new ssh conn
newSSHConn := createSSHConn(uniqName, t.sshPort, t.sshKey, &svc)
newSSHConn := createSSHConn(uniqName, t.sshPort, t.sshKey, resourcePorts, svc.Spec.ClusterIP, svc.Name)
t.conns[newSSHConn.name] = newSSHConn
go func() {
@ -121,6 +140,31 @@ func (t *SSHTunnel) startConnection(svc v1.Service) {
}
}
func (t *SSHTunnel) startConnectionIngress(ingress v1_networking.Ingress) {
uniqName := sshConnUniqNameIngress(ingress)
existingSSHConn, ok := t.conns[uniqName]
if ok {
// if the svc still exist we remove the conn from the stopping list
delete(t.connsToStop, existingSSHConn.name)
return
}
resourcePorts := []int32{80, 443}
resourceIP := "127.0.0.1"
// create new ssh conn
newSSHConn := createSSHConn(uniqName, t.sshPort, t.sshKey, resourcePorts, resourceIP, ingress.Name)
t.conns[newSSHConn.name] = newSSHConn
go func() {
err := newSSHConn.startAndWait()
if err != nil {
klog.Errorf("error starting ssh tunnel: %v", err)
}
}()
}
func (t *SSHTunnel) stopActiveConnections() {
for _, conn := range t.conns {
err := conn.stop()
@ -157,3 +201,13 @@ func sshConnUniqName(service v1.Service) string {
return strings.Join(n, "")
}
func sshConnUniqNameIngress(ingress v1_networking.Ingress) string {
n := []string{ingress.Name}
for _, rule := range ingress.Spec.Rules {
n = append(n, rule.Host)
}
return strings.Join(n, "")
}

View File

@ -20,11 +20,12 @@ minikube docker-env [flags]
### Options
```
--no-proxy Add machine IP to NO_PROXY environment variable
--shell string Force environment to be configured for a specified shell: [fish, cmd, powershell, tcsh, bash, zsh], default is auto-detect
--ssh-add Add SSH identity key to SSH authentication agent
--ssh-host Use SSH connection instead of HTTPS (port 2376)
-u, --unset Unset variables instead of setting them
--no-proxy Add machine IP to NO_PROXY environment variable
-o, --output string One of 'text', 'yaml' or 'json'.
--shell string Force environment to be configured for a specified shell: [fish, cmd, powershell, tcsh, bash, zsh], default is auto-detect
--ssh-add Add SSH identity key to SSH authentication agent
--ssh-host Use SSH connection instead of HTTPS (port 2376)
-u, --unset Unset variables instead of setting them
```
### Options inherited from parent commands

View File

@ -258,6 +258,48 @@ $ minikube image pull busybox
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
```
## minikube image push
Push images
### Synopsis
Push images
```shell
minikube image push [flags]
```
### Examples
```
$ minikube image push busybox
```
### Options inherited from parent commands
```
--add_dir_header If true, adds the file directory to the header of the log messages
--alsologtostderr log to standard error as well as files
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
-h, --help
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
--log_dir string If non-empty, write log files in this directory
--log_file string If non-empty, use this log file
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
--logtostderr log to standard error instead of files
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level)
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
--skip_headers If true, avoid header prefixes in the log messages
--skip_log_headers If true, avoid headers when opening log files
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
--user string Specifies the user executing the operation. Useful for auditing operations executed by 3rd party tools. Defaults to the operating system username.
-v, --v Level number for the log level verbosity
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
```
## minikube image rm
Remove one or more images

View File

@ -65,7 +65,7 @@ minikube start [flags]
--insecure-registry strings Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added.
--install-addons If set, install addons. Defaults to true. (default true)
--interactive Allow user prompts for more information (default true)
--iso-url strings Locations to fetch the minikube ISO from. (default [https://storage.googleapis.com/minikube-builds/iso/12032/minikube-v1.22.0-1628622362-12032.iso,https://github.com/kubernetes/minikube/releases/download/v1.22.0-1628622362-12032/minikube-v1.22.0-1628622362-12032.iso,https://kubernetes.oss-cn-hangzhou.aliyuncs.com/minikube/iso/minikube-v1.22.0-1628622362-12032.iso])
--iso-url strings Locations to fetch the minikube ISO from. (default [https://storage.googleapis.com/minikube-builds/iso/12268/minikube-v1.22.0-1628974786-12268.iso,https://github.com/kubernetes/minikube/releases/download/v1.22.0-1628974786-12268/minikube-v1.22.0-1628974786-12268.iso,https://kubernetes.oss-cn-hangzhou.aliyuncs.com/minikube/iso/minikube-v1.22.0-1628974786-12268.iso])
--keep-context This will keep the existing kubectl context and will create a minikube context.
--kubernetes-version string The Kubernetes version that the minikube VM will use (ex: v1.2.3, 'stable' for v1.21.3, 'latest' for v1.22.0-rc.0). Defaults to 'stable'.
--kvm-gpu Enable experimental NVIDIA GPU support in minikube

View File

@ -387,6 +387,9 @@ minikube failed to build an image
"GUEST_IMAGE_SAVE" (Exit code ExGuestError)
minikube failed to push or save an image
"GUEST_IMAGE_PUSH" (Exit code ExGuestError)
minikube failed to push an image
"GUEST_IMAGE_TAG" (Exit code ExGuestError)
minikube failed to tag an image

View File

@ -95,7 +95,7 @@ Simply run the following command to be enrolled into beta notifications:
minikube config set WantBetaUpdateNotification true
```
## Can I get rid of the emoji in minikube's outpuut?
## Can I get rid of the emoji in minikube's output?
Yes! If you prefer not having emoji in your minikube output 😔 , just set the `MINIKUBE_IN_STYLE` environment variable to `0` or `false`:

View File

@ -47,7 +47,7 @@ spec:
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
@ -76,7 +76,7 @@ rules:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:

View File

@ -45,7 +45,7 @@ then
readonly BDIR="${ROOT_DIR}/hack/boilerplate"
pushd . >/dev/null
cd ${BDIR}
missing="$(go run boilerplate.go -rootdir ${ROOT_DIR} -boilerplate-dir ${BDIR} | egrep -v '/assets.go|/translations.go|/site/themes/|/site/node_modules|\./out|/hugo/' || true)"
missing="$(go run boilerplate.go -rootdir ${ROOT_DIR} -boilerplate-dir ${BDIR} | egrep -v '/assets.go|/translations.go|/site/themes/|/site/node_modules|\./out|/hugo/|hack/benchmark/time-to-k8s/time-to-k8s-repo' || true)"
if [[ -n "${missing}" ]]; then
echo "boilerplate missing: $missing"
echo "consider running: ${BDIR}/fix.sh"

View File

@ -29,7 +29,6 @@ import (
"os/exec"
"path/filepath"
"reflect"
"runtime"
"strings"
"testing"
"time"
@ -46,57 +45,67 @@ func TestAddons(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), Minutes(40))
defer Cleanup(t, profile, cancel)
// We don't need a dummy file is we're on GCE
if !detect.IsOnGCE() || detect.IsCloudShell() {
// Set an env var to point to our dummy credentials file
err := os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", filepath.Join(*testdataDir, "gcp-creds.json"))
defer os.Unsetenv("GOOGLE_APPLICATION_CREDENTIALS")
if err != nil {
t.Fatalf("Failed setting GOOGLE_APPLICATION_CREDENTIALS env var: %v", err)
setupSucceeded := t.Run("Setup", func(t *testing.T) {
// We don't need a dummy file is we're on GCE
if !detect.IsOnGCE() || detect.IsCloudShell() {
// Set an env var to point to our dummy credentials file
err := os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", filepath.Join(*testdataDir, "gcp-creds.json"))
t.Cleanup(func() {
os.Unsetenv("GOOGLE_APPLICATION_CREDENTIALS")
})
if err != nil {
t.Fatalf("Failed setting GOOGLE_APPLICATION_CREDENTIALS env var: %v", err)
}
err = os.Setenv("GOOGLE_CLOUD_PROJECT", "this_is_fake")
t.Cleanup(func() {
os.Unsetenv("GOOGLE_CLOUD_PROJECT")
})
if err != nil {
t.Fatalf("Failed setting GOOGLE_CLOUD_PROJECT env var: %v", err)
}
}
err = os.Setenv("GOOGLE_CLOUD_PROJECT", "this_is_fake")
defer os.Unsetenv("GOOGLE_CLOUD_PROJECT")
if err != nil {
t.Fatalf("Failed setting GOOGLE_CLOUD_PROJECT env var: %v", err)
args := append([]string{"start", "-p", profile, "--wait=true", "--memory=4000", "--alsologtostderr", "--addons=registry", "--addons=metrics-server", "--addons=olm", "--addons=volumesnapshots", "--addons=csi-hostpath-driver"}, StartArgs()...)
if !NoneDriver() { // none driver does not support ingress
args = append(args, "--addons=ingress")
}
if !arm64Platform() {
args = append(args, "--addons=helm-tiller")
}
if !detect.IsOnGCE() {
args = append(args, "--addons=gcp-auth")
}
}
args := append([]string{"start", "-p", profile, "--wait=true", "--memory=4000", "--alsologtostderr", "--addons=registry", "--addons=metrics-server", "--addons=olm", "--addons=volumesnapshots", "--addons=csi-hostpath-driver"}, StartArgs()...)
if !NoneDriver() && !(runtime.GOOS == "darwin" && KicDriver()) { // none driver and macos docker driver does not support ingress
args = append(args, "--addons=ingress")
}
if !arm64Platform() {
args = append(args, "--addons=helm-tiller")
}
if !detect.IsOnGCE() {
args = append(args, "--addons=gcp-auth")
}
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
if err != nil {
t.Fatalf("%s failed: %v", rr.Command(), err)
}
// If we're running the integration tests on GCE, which is frequently the case, first check to make sure we exit out properly,
// then use force to actually test using creds.
if detect.IsOnGCE() {
args = []string{"-p", profile, "addons", "enable", "gcp-auth"}
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
if err == nil {
t.Errorf("Expected error but didn't get one. command %v, output %v", rr.Command(), rr.Output())
} else {
if !strings.Contains(rr.Output(), "It seems that you are running in GCE") {
t.Errorf("Unexpected error message: %v", rr.Output())
if err != nil {
t.Fatalf("%s failed: %v", rr.Command(), err)
}
// If we're running the integration tests on GCE, which is frequently the case, first check to make sure we exit out properly,
// then use force to actually test using creds.
if detect.IsOnGCE() {
args = []string{"-p", profile, "addons", "enable", "gcp-auth"}
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
if err == nil {
t.Errorf("Expected error but didn't get one. command %v, output %v", rr.Command(), rr.Output())
} else {
// ok, use force here since we are in GCE
// do not use --force unless absolutely necessary
args = append(args, "--force")
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
if err != nil {
t.Errorf("%s failed: %v", rr.Command(), err)
if !strings.Contains(rr.Output(), "It seems that you are running in GCE") {
t.Errorf("Unexpected error message: %v", rr.Output())
} else {
// ok, use force here since we are in GCE
// do not use --force unless absolutely necessary
args = append(args, "--force")
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
if err != nil {
t.Errorf("%s failed: %v", rr.Command(), err)
}
}
}
}
})
if !setupSucceeded {
t.Fatalf("Failed setup for addon tests")
}
// Parallelized tests
@ -125,25 +134,27 @@ func TestAddons(t *testing.T) {
}
})
// Assert that disable/enable works offline
rr, err = Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile))
if err != nil {
t.Errorf("failed to stop minikube. args %q : %v", rr.Command(), err)
}
rr, err = Run(t, exec.CommandContext(ctx, Target(), "addons", "enable", "dashboard", "-p", profile))
if err != nil {
t.Errorf("failed to enable dashboard addon: args %q : %v", rr.Command(), err)
}
rr, err = Run(t, exec.CommandContext(ctx, Target(), "addons", "disable", "dashboard", "-p", profile))
if err != nil {
t.Errorf("failed to disable dashboard addon: args %q : %v", rr.Command(), err)
}
t.Run("StoppedEnableDisable", func(t *testing.T) {
// Assert that disable/enable works offline
rr, err := Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile))
if err != nil {
t.Errorf("failed to stop minikube. args %q : %v", rr.Command(), err)
}
rr, err = Run(t, exec.CommandContext(ctx, Target(), "addons", "enable", "dashboard", "-p", profile))
if err != nil {
t.Errorf("failed to enable dashboard addon: args %q : %v", rr.Command(), err)
}
rr, err = Run(t, exec.CommandContext(ctx, Target(), "addons", "disable", "dashboard", "-p", profile))
if err != nil {
t.Errorf("failed to disable dashboard addon: args %q : %v", rr.Command(), err)
}
})
}
// validateIngressAddon tests the ingress addon by deploying a default nginx pod
func validateIngressAddon(ctx context.Context, t *testing.T, profile string) {
defer PostMortemLogs(t, profile)
if NoneDriver() || (runtime.GOOS == "darwin" && KicDriver()) {
if NoneDriver() {
t.Skipf("skipping: ingress not supported ")
}
@ -159,10 +170,10 @@ func validateIngressAddon(ctx context.Context, t *testing.T, profile string) {
t.Fatalf("failed waititing for ingress-nginx-controller : %v", err)
}
// create networking.k8s.io/v1beta1 ingress
createv1betaIngress := func() error {
// create networking.k8s.io/v1 ingress
createv1Ingress := func() error {
// apply networking.k8s.io/v1beta1 ingress
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "nginx-ingv1beta.yaml")))
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "nginx-ingv1.yaml")))
if err != nil {
return err
}
@ -172,8 +183,8 @@ func validateIngressAddon(ctx context.Context, t *testing.T, profile string) {
return nil
}
// create networking.k8s.io/v1beta1 ingress
if err := retry.Expo(createv1betaIngress, 1*time.Second, Seconds(90)); err != nil {
// create networking.k8s.io/v1 ingress
if err := retry.Expo(createv1Ingress, 1*time.Second, Seconds(90)); err != nil {
t.Errorf("failed to create ingress: %v", err)
}
@ -224,19 +235,6 @@ func validateIngressAddon(ctx context.Context, t *testing.T, profile string) {
t.Errorf("failed to get expected response from %s within minikube: %v", addr, err)
}
// create networking.k8s.io/v1 ingress
createv1Ingress := func() error {
// apply networking.k8s.io/v1beta1 ingress
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "nginx-ingv1.yaml")))
if err != nil {
return err
}
if rr.Stderr.String() != "" {
t.Logf("%v: unexpected stderr: %s (may be temporary)", rr.Command(), rr.Stderr)
}
return nil
}
// create networking.k8s.io/v1 ingress
if err := retry.Expo(createv1Ingress, 1*time.Second, Seconds(90)); err != nil {
t.Errorf("failed to create ingress: %v", err)

View File

@ -136,23 +136,27 @@ func validateServiceStable(ctx context.Context, t *testing.T, profile string) {
t.Skip("The test WaitService is broken on github actions in macos https://github.com/kubernetes/minikube/issues/8434")
}
checkRoutePassword(t)
setupSucceeded := t.Run("Setup", func(t *testing.T) {
client, err := kapi.Client(profile)
if err != nil {
t.Fatalf("failed to get Kubernetes client for %q: %v", profile, err)
}
client, err := kapi.Client(profile)
if err != nil {
t.Fatalf("failed to get Kubernetes client for %q: %v", profile, err)
}
// Start the "nginx" pod.
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "apply", "-f", filepath.Join(*testdataDir, "testsvc.yaml")))
if err != nil {
t.Fatalf("%s failed: %v", rr.Command(), err)
}
if _, err := PodWait(ctx, t, profile, "default", "run=nginx-svc", Minutes(4)); err != nil {
t.Fatalf("wait: %v", err)
}
// Start the "nginx" pod.
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "apply", "-f", filepath.Join(*testdataDir, "testsvc.yaml")))
if err != nil {
t.Fatalf("%s failed: %v", rr.Command(), err)
}
if _, err := PodWait(ctx, t, profile, "default", "run=nginx-svc", Minutes(4)); err != nil {
t.Fatalf("wait: %v", err)
}
if err := kapi.WaitForService(client, "default", "nginx-svc", true, 1*time.Second, Minutes(2)); err != nil {
t.Fatal(errors.Wrap(err, "Error waiting for nginx service to be up"))
if err := kapi.WaitForService(client, "default", "nginx-svc", true, 1*time.Second, Minutes(2)); err != nil {
t.Fatal(errors.Wrap(err, "Error waiting for nginx service to be up"))
}
})
if !setupSucceeded {
t.Fatal("Failed setup")
}
t.Run("IngressIP", func(t *testing.T) {
@ -160,7 +164,7 @@ func validateServiceStable(ctx context.Context, t *testing.T, profile string) {
t.Skip("The test WaitService/IngressIP is broken on hyperv https://github.com/kubernetes/minikube/issues/8381")
}
// Wait until the nginx-svc has a loadbalancer ingress IP
err = wait.PollImmediate(5*time.Second, Minutes(3), func() (bool, error) {
err := wait.PollImmediate(5*time.Second, Minutes(3), func() (bool, error) {
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "svc", "nginx-svc", "-o", "jsonpath={.status.loadBalancer.ingress[0].ip}"))
if err != nil {
return false, err

View File

@ -35,11 +35,13 @@ func TestGuestEnvironment(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), Minutes(15))
defer CleanupWithLogs(t, profile, cancel)
args := append([]string{"start", "-p", profile, "--install-addons=false", "--memory=2048", "--wait=false"}, StartArgs()...)
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
if err != nil {
t.Errorf("failed to start minikube: args %q: %v", rr.Command(), err)
}
t.Run("Setup", func(t *testing.T) {
args := append([]string{"start", "-p", profile, "--install-addons=false", "--memory=2048", "--wait=false"}, StartArgs()...)
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
if err != nil {
t.Errorf("failed to start minikube: args %q: %v", rr.Command(), err)
}
})
// Run as a group so that our defer doesn't happen as tests are runnings
t.Run("Binaries", func(t *testing.T) {

View File

@ -55,18 +55,21 @@ func TestJSONOutput(t *testing.T) {
for _, test := range tests {
t.Run(test.command, func(t *testing.T) {
args := []string{test.command, "-p", profile, "--output=json", "--user=testUser"}
args = append(args, test.args...)
var ces []*cloudEvent
t.Run("Command", func(t *testing.T) {
args := []string{test.command, "-p", profile, "--output=json", "--user=testUser"}
args = append(args, test.args...)
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
if err != nil {
t.Errorf("failed to clean up: args %q: %v", rr.Command(), err)
}
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
if err != nil {
t.Errorf("failed to clean up: args %q: %v", rr.Command(), err)
}
ces, err := cloudEvents(t, rr)
if err != nil {
t.Fatalf("converting to cloud events: %v\n", err)
}
ces, err = cloudEvents(t, rr)
if err != nil {
t.Fatalf("converting to cloud events: %v\n", err)
}
})
t.Run("Audit", func(t *testing.T) {
got, err := auditContains("testUser")

View File

@ -33,6 +33,7 @@ import (
"github.com/google/go-cmp/cmp"
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/detect"
)
// TestStartStop tests starting, stopping and restarting a minikube clusters with various Kubernetes versions and configurations
@ -76,6 +77,16 @@ func TestStartStop(t *testing.T) {
}},
}
if detect.IsCloudShell() {
tests = []struct {
name string
version string
args []string
}{
{"cloud-shell", constants.DefaultKubernetesVersion, []string{}},
}
}
for _, tc := range tests {
tc := tc
t.Run(tc.name, func(t *testing.T) {

View File

@ -1,17 +0,0 @@
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: nginx-ingress
annotations:
kubernetes.io/ingress.class: "nginx"
labels:
integration-test: ingress
spec:
rules:
- host: nginx.example.com
http:
paths:
- path: "/"
backend:
serviceName: nginx
servicePort: 80

View File

@ -161,51 +161,53 @@ func TestStoppedBinaryUpgrade(t *testing.T) {
}
defer os.Remove(tf.Name())
args := append([]string{"start", "-p", profile, "--memory=2200"}, legacyStartArgs()...)
rr := &RunResult{}
r := func() error {
c := exec.CommandContext(ctx, tf.Name(), args...)
var legacyEnv []string
// replace the global KUBECONFIG with a fresh kubeconfig
// because for minikube<1.17.0 it can not read the new kubeconfigs that have extra "Extenions" block
// see: https://github.com/kubernetes/minikube/issues/10210
for _, e := range os.Environ() {
if !strings.Contains(e, "KUBECONFIG") { // get all global envs except the Kubeconfig which is used by new versions of minikubes
legacyEnv = append(legacyEnv, e)
t.Run("Upgrade", func(t *testing.T) {
args := append([]string{"start", "-p", profile, "--memory=2200"}, legacyStartArgs()...)
rr := &RunResult{}
r := func() error {
c := exec.CommandContext(ctx, tf.Name(), args...)
var legacyEnv []string
// replace the global KUBECONFIG with a fresh kubeconfig
// because for minikube<1.17.0 it can not read the new kubeconfigs that have extra "Extenions" block
// see: https://github.com/kubernetes/minikube/issues/10210
for _, e := range os.Environ() {
if !strings.Contains(e, "KUBECONFIG") { // get all global envs except the Kubeconfig which is used by new versions of minikubes
legacyEnv = append(legacyEnv, e)
}
}
// using a fresh kubeconfig for this test
legacyKubeConfig, err := ioutil.TempFile("", "legacy_kubeconfig")
if err != nil {
t.Fatalf("failed to create temp file for legacy kubeconfig %v", err)
}
defer os.Remove(legacyKubeConfig.Name()) // clean up
legacyEnv = append(legacyEnv, fmt.Sprintf("KUBECONFIG=%s", legacyKubeConfig.Name()))
c.Env = legacyEnv
rr, err = Run(t, c)
return err
}
// using a fresh kubeconfig for this test
legacyKubeConfig, err := ioutil.TempFile("", "legacy_kubeconfig")
// Retry up to two times, to allow flakiness for the legacy release
if err := retry.Expo(r, 1*time.Second, Minutes(30), 2); err != nil {
t.Fatalf("legacy %s start failed: %v", desiredLegacyVersion, err)
}
rr, err = Run(t, exec.CommandContext(ctx, tf.Name(), "-p", profile, "stop"))
if err != nil {
t.Fatalf("failed to create temp file for legacy kubeconfig %v", err)
t.Errorf("failed to stop cluster: %s: %v", rr.Command(), err)
}
defer os.Remove(legacyKubeConfig.Name()) // clean up
legacyEnv = append(legacyEnv, fmt.Sprintf("KUBECONFIG=%s", legacyKubeConfig.Name()))
c.Env = legacyEnv
rr, err = Run(t, c)
return err
}
// Retry up to two times, to allow flakiness for the legacy release
if err := retry.Expo(r, 1*time.Second, Minutes(30), 2); err != nil {
t.Fatalf("legacy %s start failed: %v", desiredLegacyVersion, err)
}
rr, err = Run(t, exec.CommandContext(ctx, tf.Name(), "-p", profile, "stop"))
if err != nil {
t.Errorf("failed to stop cluster: %s: %v", rr.Command(), err)
}
args = append([]string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=1"}, StartArgs()...)
rr, err = Run(t, exec.CommandContext(ctx, Target(), args...))
if err != nil {
t.Fatalf("upgrade from %s to HEAD failed: %s: %v", desiredLegacyVersion, rr.Command(), err)
}
args = append([]string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=1"}, StartArgs()...)
rr, err = Run(t, exec.CommandContext(ctx, Target(), args...))
if err != nil {
t.Fatalf("upgrade from %s to HEAD failed: %s: %v", desiredLegacyVersion, rr.Command(), err)
}
})
t.Run("MinikubeLogs", func(t *testing.T) {
args := []string{"logs", "-p", profile}
rr, err = Run(t, exec.CommandContext(ctx, Target(), args...))
_, err := Run(t, exec.CommandContext(ctx, Target(), args...))
if err != nil {
t.Fatalf("`minikube logs` after upgrade to HEAD from %s failed: %v", desiredLegacyVersion, err)
}

View File

@ -17,6 +17,7 @@
"- {{.logPath}}": "",
"--kvm-numa-count range is 1-8": "",
"--network flag is only valid with the docker/podman and KVM drivers, it will be ignored": "",
"127.0.0.1": "",
"\u003ctarget file absolute path\u003e must be an absolute Path. Relative Path is not allowed (example: \"/home/docker/copied.txt\")": "",
"==\u003e Audit \u003c==": "",
"==\u003e Last Start \u003c==": "",
@ -68,6 +69,8 @@
"Bridge CNI is incompatible with multi-node clusters, use a different CNI": "",
"Build a container image in minikube": "",
"Build a container image, using the container runtime.": "",
"CGroup allocation is not available in your environment, You might be running minikube in a nested container. Try running:\n\t\t\t\n\tminikube start --extra-config=kubelet.cgroups-per-qos=false --extra-config=kubelet.enforce-node-allocatable=\"\"\n\n\t\t\t\n\t\t\t": "",
"CGroup allocation is not available in your environment. You might be running minikube in a nested container. Try running:\n\t\t\t\n\tminikube start --extra-config=kubelet.cgroups-per-qos=false --extra-config=kubelet.enforce-node-allocatable=\"\"\n\n\t\t\t\n\t\t\t": "",
"CNI plug-in to use. Valid options: auto, bridge, calico, cilium, flannel, kindnet, or path to a CNI manifest (default: auto)": "",
"Cache image from docker daemon": "",
"Cache image from remote registry": "",
@ -151,7 +154,6 @@
"Downloading Kubernetes {{.version}} preload ...": "",
"Downloading VM boot image ...": "",
"Downloading driver {{.driver}}:": "",
"Due to networking limitations of driver {{.driver_name}} on {{.os_name}}, {{.addon_name}} addon is not supported.\nAlternatively to use this addon you can use a vm-based driver:\n\n\t'minikube start --vm=true'\n\nTo track the update on this work in progress feature please check:\nhttps://github.com/kubernetes/minikube/issues/7332": "",
"Due to networking limitations of driver {{.driver_name}}, {{.addon_name}} addon is not fully supported. Try using a different driver.": "",
"ERROR creating `registry-creds-acr` secret": "",
"ERROR creating `registry-creds-dpr` secret": "",
@ -248,6 +250,8 @@
"Failed to load image": "",
"Failed to persist images": "",
"Failed to pull image": "",
"Failed to pull images": "",
"Failed to push images": "",
"Failed to reload cached images": "",
"Failed to remove image": "",
"Failed to save config {{.profile}}": "",
@ -259,6 +263,7 @@
"Failed to start container runtime": "",
"Failed to start {{.driver}} {{.driver_type}}. Running \"{{.cmd}}\" may fix it: {{.error}}": "",
"Failed to stop node {{.name}}": "",
"Failed to tag images": "",
"Failed to update cluster": "",
"Failed to update config": "",
"Failed unmount: {{.error}}": "",
@ -400,6 +405,7 @@
"Number of extra disks created and attached to the minikube VM (currently only implemented for hyperkit driver)": "",
"Number of lines back to go within the log": "",
"OS release is {{.pretty_name}}": "",
"One of 'text', 'yaml' or 'json'.": "",
"One of 'yaml' or 'json'.": "",
"Only alphanumeric and dashes '-' are permitted. Minimum 1 character, starting with alphanumeric.": "",
"Only alphanumeric and dashes '-' are permitted. Minimum 2 characters, starting with alphanumeric.": "",
@ -431,6 +437,7 @@
"Please make sure the service you are looking for is deployed or is in the correct namespace.": "",
"Please provide a path or url to build": "",
"Please provide an image in your local daemon to load into minikube via \u003cminikube image load IMAGE_NAME\u003e": "",
"Please provide source and target image": "",
"Please re-eval your docker-env, To ensure your environment variables have updated ports:\n\n\t'minikube -p {{.profile_name}} docker-env'\n\n\t": "",
"Please re-eval your podman-env, To ensure your environment variables have updated ports:\n\n\t'minikube -p {{.profile_name}} podman-env'\n\n\t": "",
"Please see {{.documentation_url}} for more details": "",
@ -456,8 +463,10 @@
"Profile name '{{.profilename}}' is not valid": "",
"Profile name should be unique": "",
"Provide VM UUID to restore MAC address (hyperkit driver only)": "Geben Sie die VM-UUID an, um die MAC-Adresse wiederherzustellen (nur Hyperkit-Treiber)",
"Pull images": "",
"Pull the remote image (no caching)": "",
"Pulling base image ...": "",
"Push images": "",
"Push the new image (requires tag)": "",
"Reboot to complete VirtualBox installation, verify that VirtualBox is not blocked by your system, and/or use another hypervisor": "",
"Rebuild libvirt with virt-network support": "",
@ -576,6 +585,7 @@
"Successfully stopped node {{.name}}": "",
"Suggestion: {{.advice}}": "",
"System only has {{.size}}MiB available, less than the required {{.req}}MiB for Kubernetes": "",
"Tag images": "",
"Tag to apply to the new image (optional)": "",
"Target directory {{.path}} must be an absolute path": "",
"Target {{.path}} can not be empty": "",
@ -659,7 +669,7 @@
"The podman-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/": "",
"The requested memory allocation of {{.requested}}MiB does not leave room for system overhead (total system memory: {{.system_limit}}MiB). You may face stability issues.": "",
"The service namespace": "",
"The service {{.service}} requires privileged ports to be exposed: {{.ports}}": "",
"The service/ingress {{.resource}} requires privileged ports to be exposed: {{.ports}}": "",
"The services namespace": "",
"The time interval for each check that wait performs in seconds": "",
"The value passed to --format is invalid": "",
@ -846,11 +856,13 @@
"error provisioning guest": "",
"error starting tunnel": "",
"error stopping tunnel": "",
"error: --output must be 'text', 'yaml' or 'json'": "",
"error: --output must be 'yaml' or 'json'": "",
"experimental": "",
"failed to add node": "",
"failed to open browser: {{.error}}": "",
"failed to save config": "",
"failed to set cloud shell kubelet config options": "",
"failed to start node": "",
"fish completion failed": "",
"fish completion.": "",

View File

@ -18,6 +18,7 @@
"- {{.logPath}}": "",
"--kvm-numa-count range is 1-8": "",
"--network flag is only valid with the docker/podman and KVM drivers, it will be ignored": "",
"127.0.0.1": "",
"\u003ctarget file absolute path\u003e must be an absolute Path. Relative Path is not allowed (example: \"/home/docker/copied.txt\")": "",
"==\u003e Audit \u003c==": "",
"==\u003e Last Start \u003c==": "",
@ -69,6 +70,8 @@
"Bridge CNI is incompatible with multi-node clusters, use a different CNI": "",
"Build a container image in minikube": "",
"Build a container image, using the container runtime.": "",
"CGroup allocation is not available in your environment, You might be running minikube in a nested container. Try running:\n\t\t\t\n\tminikube start --extra-config=kubelet.cgroups-per-qos=false --extra-config=kubelet.enforce-node-allocatable=\"\"\n\n\t\t\t\n\t\t\t": "",
"CGroup allocation is not available in your environment. You might be running minikube in a nested container. Try running:\n\t\t\t\n\tminikube start --extra-config=kubelet.cgroups-per-qos=false --extra-config=kubelet.enforce-node-allocatable=\"\"\n\n\t\t\t\n\t\t\t": "",
"CNI plug-in to use. Valid options: auto, bridge, calico, cilium, flannel, kindnet, or path to a CNI manifest (default: auto)": "Plug-in CNI para usar. Opciones validas: auto, bridge, calico, cilium, flannel, kindnet, o ruta a un manifiesto CNI (Por defecto: auto)",
"Cache image from docker daemon": "",
"Cache image from remote registry": "",
@ -253,6 +256,8 @@
"Failed to load image": "",
"Failed to persist images": "",
"Failed to pull image": "",
"Failed to pull images": "",
"Failed to push images": "",
"Failed to reload cached images": "",
"Failed to remove image": "",
"Failed to save config {{.profile}}": "",
@ -264,6 +269,7 @@
"Failed to start container runtime": "",
"Failed to start {{.driver}} {{.driver_type}}. Running \"{{.cmd}}\" may fix it: {{.error}}": "",
"Failed to stop node {{.name}}": "",
"Failed to tag images": "",
"Failed to update cluster": "",
"Failed to update config": "",
"Failed unmount: {{.error}}": "",
@ -405,6 +411,7 @@
"Number of extra disks created and attached to the minikube VM (currently only implemented for hyperkit driver)": "",
"Number of lines back to go within the log": "",
"OS release is {{.pretty_name}}": "",
"One of 'text', 'yaml' or 'json'.": "",
"One of 'yaml' or 'json'.": "",
"Only alphanumeric and dashes '-' are permitted. Minimum 1 character, starting with alphanumeric.": "",
"Only alphanumeric and dashes '-' are permitted. Minimum 2 characters, starting with alphanumeric.": "",
@ -436,6 +443,7 @@
"Please make sure the service you are looking for is deployed or is in the correct namespace.": "",
"Please provide a path or url to build": "",
"Please provide an image in your local daemon to load into minikube via \u003cminikube image load IMAGE_NAME\u003e": "",
"Please provide source and target image": "",
"Please re-eval your docker-env, To ensure your environment variables have updated ports:\n\n\t'minikube -p {{.profile_name}} docker-env'\n\n\t": "",
"Please re-eval your podman-env, To ensure your environment variables have updated ports:\n\n\t'minikube -p {{.profile_name}} podman-env'\n\n\t": "",
"Please see {{.documentation_url}} for more details": "",
@ -461,8 +469,10 @@
"Profile name '{{.profilename}}' is not valid": "",
"Profile name should be unique": "",
"Provide VM UUID to restore MAC address (hyperkit driver only)": "Permite especificar un UUID de VM para restaurar la dirección MAC (solo con el controlador de hyperkit)",
"Pull images": "",
"Pull the remote image (no caching)": "",
"Pulling base image ...": "",
"Push images": "",
"Push the new image (requires tag)": "",
"Reboot to complete VirtualBox installation, verify that VirtualBox is not blocked by your system, and/or use another hypervisor": "",
"Rebuild libvirt with virt-network support": "",
@ -581,6 +591,7 @@
"Successfully stopped node {{.name}}": "",
"Suggestion: {{.advice}}": "",
"System only has {{.size}}MiB available, less than the required {{.req}}MiB for Kubernetes": "",
"Tag images": "",
"Tag to apply to the new image (optional)": "",
"Target directory {{.path}} must be an absolute path": "",
"Target {{.path}} can not be empty": "",
@ -664,7 +675,7 @@
"The podman-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/": "",
"The requested memory allocation of {{.requested}}MiB does not leave room for system overhead (total system memory: {{.system_limit}}MiB). You may face stability issues.": "",
"The service namespace": "",
"The service {{.service}} requires privileged ports to be exposed: {{.ports}}": "",
"The service/ingress {{.resource}} requires privileged ports to be exposed: {{.ports}}": "",
"The services namespace": "",
"The time interval for each check that wait performs in seconds": "",
"The value passed to --format is invalid": "",
@ -851,11 +862,13 @@
"error provisioning guest": "",
"error starting tunnel": "",
"error stopping tunnel": "",
"error: --output must be 'text', 'yaml' or 'json'": "",
"error: --output must be 'yaml' or 'json'": "",
"experimental": "",
"failed to add node": "",
"failed to open browser: {{.error}}": "",
"failed to save config": "",
"failed to set cloud shell kubelet config options": "",
"failed to start node": "",
"fish completion failed": "",
"fish completion.": "",

View File

@ -18,6 +18,7 @@
"- {{.logPath}}": "- {{.logPath}}",
"--kvm-numa-count range is 1-8": "la tranche de --kvm-numa-count est 1 à 8",
"--network flag is only valid with the docker/podman and KVM drivers, it will be ignored": "le drapeau --network est valide uniquement avec les pilotes docker/podman et KVM, il va être ignoré",
"127.0.0.1": "127.0.0.1",
"\u003ctarget file absolute path\u003e must be an absolute Path. Relative Path is not allowed (example: \"/home/docker/copied.txt\")": "\u003ctarget file absolute path\u003e doit être un chemin absolu. Les chemins relatifs ne sont pas autorisés (exemple: \"/home/docker/copied.txt\")",
"==\u003e Audit \u003c==": "==\u003e Audit \u003c==",
"==\u003e Last Start \u003c==": "==\u003e Dernier démarrage \u003c==",
@ -70,6 +71,8 @@
"Bridge CNI is incompatible with multi-node clusters, use a different CNI": "Le pont CNI est incompatible avec les clusters multi-nœuds, utilisez un autre CNI",
"Build a container image in minikube": "Construire une image de conteneur dans minikube",
"Build a container image, using the container runtime.": "Construire une image de conteneur à l'aide de l'environnement d'exécution du conteneur.",
"CGroup allocation is not available in your environment, You might be running minikube in a nested container. Try running:\n\t\t\t\n\tminikube start --extra-config=kubelet.cgroups-per-qos=false --extra-config=kubelet.enforce-node-allocatable=\"\"\n\n\t\t\t\n\t\t\t": "L'allocation CGroup n'est pas disponible dans votre environnement, vous exécutez peut-être minikube dans un conteneur imbriqué. Essayez d'exécuter :\n\t\t\t\n\tminikube start --extra-config=kubelet.cgroups-per-qos=false --extra-config=kubelet.enforce-node-allocatable=\"\"\n\n\t\t\t\n\t\t\t",
"CGroup allocation is not available in your environment. You might be running minikube in a nested container. Try running:\n\t\t\t\n\tminikube start --extra-config=kubelet.cgroups-per-qos=false --extra-config=kubelet.enforce-node-allocatable=\"\"\n\n\t\t\t\n\t\t\t": "L'allocation CGroup n'est pas disponible dans votre environnement, vous exécutez peut-être minikube dans un conteneur imbriqué. Essayez d'exécuter :\n\t\t\t\n\tminikube start --extra-config=kubelet.cgroups-per-qos=false --extra-config=kubelet.enforce-node-allocatable=\"\"\n\n\t\t\t\n\t\t\t",
"CNI plug-in to use. Valid options: auto, bridge, calico, cilium, flannel, kindnet, or path to a CNI manifest (default: auto)": "Plug-in CNI à utiliser. Options valides : auto, bridge, calico, cilium, flannel, kindnet ou chemin vers un manifeste CNI (par défaut : auto)",
"Cache image from docker daemon": "Cacher l'image du démon docker",
"Cache image from remote registry": "Cacher l'image du registre distant",
@ -251,6 +254,8 @@
"Failed to load image": "Échec du chargement de l'image",
"Failed to persist images": "Échec de la persistance des images",
"Failed to pull image": "Échec de l'extraction de l'image",
"Failed to pull images": "",
"Failed to push images": "",
"Failed to reload cached images": "Échec du rechargement des images mises en cache",
"Failed to remove image": "Échec de la suppression de l'image",
"Failed to save config {{.profile}}": "Échec de l'enregistrement de la configuration {{.profile}}",
@ -262,6 +267,7 @@
"Failed to start container runtime": "Échec du démarrage de l'exécution du conteneur",
"Failed to start {{.driver}} {{.driver_type}}. Running \"{{.cmd}}\" may fix it: {{.error}}": "Échec du démarrage de {{.driver}} {{.driver_type}}. L'exécution de \"{{.cmd}}\" peut résoudre le problème : {{.error}}",
"Failed to stop node {{.name}}": "Échec de l'arrêt du nœud {{.name}}",
"Failed to tag images": "",
"Failed to update cluster": "Échec de la mise à jour du cluster",
"Failed to update config": "Échec de la mise à jour de la configuration",
"Failed to verify '{{.driver_name}} info' will try again ...": "Échec de la vérification des informations sur '{{.driver_name}}' va réessayer ...",
@ -405,6 +411,7 @@
"Number of extra disks created and attached to the minikube VM (currently only implemented for hyperkit driver)": "Nombre de disques supplémentaires créés et attachés à la machine virtuelle minikube (actuellement implémenté uniquement pour le pilote hyperkit)",
"Number of lines back to go within the log": "Nombre de lignes à remonter dans le journal",
"OS release is {{.pretty_name}}": "La version du système d'exploitation est {{.pretty_name}}",
"One of 'text', 'yaml' or 'json'.": "Un parmi 'text', 'yaml' ou 'json'.",
"One of 'yaml' or 'json'.": "Un parmi 'yaml' ou 'json'.",
"Only alphanumeric and dashes '-' are permitted. Minimum 1 character, starting with alphanumeric.": "Seuls les caractères alphanumériques et les tirets '-' sont autorisés. Minimum 1 caractère, commençant par alphanumérique.",
"Only alphanumeric and dashes '-' are permitted. Minimum 2 characters, starting with alphanumeric.": "Seuls les caractères alphanumériques et les tirets '-' sont autorisés. Minimum 2 caractères, commençant par alphanumérique.",
@ -436,6 +443,7 @@
"Please make sure the service you are looking for is deployed or is in the correct namespace.": "Veuillez vous assurer que le service que vous recherchez est déployé ou se trouve dans le bon espace de noms.",
"Please provide a path or url to build": "Veuillez fournir un chemin ou une URL à construire",
"Please provide an image in your local daemon to load into minikube via \u003cminikube image load IMAGE_NAME\u003e": "Veuillez fournir une image dans votre démon local à charger dans minikube via \u003cminikube image load IMAGE_NAME\u003e",
"Please provide source and target image": "",
"Please re-eval your docker-env, To ensure your environment variables have updated ports:\n\n\t'minikube -p {{.profile_name}} docker-env'\n\n\t": "Veuillez réévaluer votre docker-env, pour vous assurer que vos variables d'environnement ont des ports mis à jour :\n\n\t'minikube -p {{.profile_name}} docker-env'\n\n\t",
"Please re-eval your podman-env, To ensure your environment variables have updated ports:\n\n\t'minikube -p {{.profile_name}} podman-env'\n\n\t": "Veuillez réévaluer votre podman-env, pour vous assurer que vos variables d'environnement ont des ports mis à jour :\n\n\t'minikube -p {{.profile_name}} podman-env'\n\n\t",
"Please see {{.documentation_url}} for more details": "Veuillez consulter {{.documentation_url}} pour plus de détails",
@ -461,9 +469,11 @@
"Profile name '{{.profilename}}' is not valid": "Le nom de profil '{{.profilename}}' n'est pas valide",
"Profile name should be unique": "Le nom du profil doit être unique",
"Provide VM UUID to restore MAC address (hyperkit driver only)": "Fournit l'identifiant unique universel (UUID) de la VM pour restaurer l'adresse MAC (pilote hyperkit uniquement).",
"Pull images": "",
"Pull the remote image (no caching)": "Extraire l'image distante (pas de mise en cache)",
"Pulling base image ...": "Extraction de l'image de base...",
"Pulling images ...": "Extraction des images... ",
"Push images": "",
"Push the new image (requires tag)": "Pousser la nouvelle image (nécessite une balise)",
"Reboot to complete VirtualBox installation, verify that VirtualBox is not blocked by your system, and/or use another hypervisor": "Redémarrez pour terminer l'installation de VirtualBox, vérifiez que VirtualBox n'est pas bloqué par votre système et/ou utilisez un autre hyperviseur",
"Rebuild libvirt with virt-network support": "Reconstruire libvirt avec le support de virt-network",
@ -568,7 +578,7 @@
"Starts a node.": "Démarre un nœud.",
"Starts an existing stopped node in a cluster.": "Démarre un nœud arrêté existant dans un cluster.",
"Startup with {{.old_driver}} driver failed, trying with alternate driver {{.new_driver}}: {{.error}}": "Échec du démarrage avec le pilote {{.old_driver}}, essai avec un autre pilote {{.new_driver}} : {{.error}}",
"Stopped tunnel for service {{.service}}.": "",
"Stopped tunnel for service {{.service}}.": "Tunnel arrêté pour le service {{.service}}.",
"Stopping \"{{.profile_name}}\" in {{.driver_name}} ...": "Arrêt de \"{{.profile_name}}\" sur {{.driver_name}}...",
"Stopping node \"{{.name}}\" ...": "Nœud d'arrêt \"{{.name}}\" ...",
"Stopping tunnel for service {{.service}}.": "Tunnel d'arrêt pour le service {{.service}}.",
@ -583,6 +593,7 @@
"Successfully stopped node {{.name}}": "Nœud {{.name}} arrêté avec succès",
"Suggestion: {{.advice}}": "Suggestion : {{.advice}}",
"System only has {{.size}}MiB available, less than the required {{.req}}MiB for Kubernetes": "Le système n'a que {{.size}} Mio disponibles, moins que les {{.req}} Mio requis pour Kubernetes",
"Tag images": "",
"Tag to apply to the new image (optional)": "Tag à appliquer à la nouvelle image (facultatif)",
"Target directory {{.path}} must be an absolute path": "Le répertoire cible {{.path}} doit être un chemin absolu",
"Target {{.path}} can not be empty": "La cible {{.path}} ne peut pas être vide",
@ -666,6 +677,7 @@
"The requested memory allocation of {{.requested}}MiB does not leave room for system overhead (total system memory: {{.system_limit}}MiB). You may face stability issues.": "L'allocation de mémoire demandée de {{.requested}}MiB ne laisse pas de place pour la surcharge système (mémoire système totale : {{.system_limit}}MiB). Vous pouvez rencontrer des problèmes de stabilité.",
"The service namespace": "L'espace de nom du service",
"The service {{.service}} requires privileged ports to be exposed: {{.ports}}": "Le service {{.service}} nécessite l'exposition des ports privilégiés : {{.ports}}",
"The service/ingress {{.resource}} requires privileged ports to be exposed: {{.ports}}": "Le service/ingress {{.resource}} nécessite l'exposition des ports privilégiés : {{.ports}}",
"The services namespace": "L'espace de noms des services",
"The time interval for each check that wait performs in seconds": "L'intervalle de temps pour chaque contrôle que wait effectue en secondes",
"The value passed to --format is invalid": "La valeur passée à --format n'est pas valide",
@ -858,11 +870,13 @@
"error provisioning host": "erreur lors de l'approvisionnement de l'hôte",
"error starting tunnel": "erreur de démarrage du tunnel",
"error stopping tunnel": "erreur d'arrêt du tunnel",
"error: --output must be 'text', 'yaml' or 'json'": "erreur : --output doit être 'text', 'yaml' ou 'json'",
"error: --output must be 'yaml' or 'json'": "erreur : --output doit être 'yaml' ou 'json'",
"experimental": "expérimental",
"failed to add node": "échec de l'ajout du nœud",
"failed to open browser: {{.error}}": "échec de l'ouverture du navigateur : {{.error}}",
"failed to save config": "échec de l'enregistrement de la configuration",
"failed to set cloud shell kubelet config options": "",
"failed to start node": "échec du démarrage du nœud",
"fish completion failed": "la complétion fish a échoué",
"fish completion.": "complétion fish.",

View File

@ -19,6 +19,7 @@
"- {{.logPath}}": "",
"--kvm-numa-count range is 1-8": "",
"--network flag is only valid with the docker/podman and KVM drivers, it will be ignored": "",
"127.0.0.1": "",
"\u003ctarget file absolute path\u003e must be an absolute Path. Relative Path is not allowed (example: \"/home/docker/copied.txt\")": "",
"==\u003e Audit \u003c==": "",
"==\u003e Last Start \u003c==": "",
@ -68,6 +69,8 @@
"Bridge CNI is incompatible with multi-node clusters, use a different CNI": "",
"Build a container image in minikube": "",
"Build a container image, using the container runtime.": "",
"CGroup allocation is not available in your environment, You might be running minikube in a nested container. Try running:\n\t\t\t\n\tminikube start --extra-config=kubelet.cgroups-per-qos=false --extra-config=kubelet.enforce-node-allocatable=\"\"\n\n\t\t\t\n\t\t\t": "",
"CGroup allocation is not available in your environment. You might be running minikube in a nested container. Try running:\n\t\t\t\n\tminikube start --extra-config=kubelet.cgroups-per-qos=false --extra-config=kubelet.enforce-node-allocatable=\"\"\n\n\t\t\t\n\t\t\t": "",
"CNI plug-in to use. Valid options: auto, bridge, calico, cilium, flannel, kindnet, or path to a CNI manifest (default: auto)": "",
"Cache image from docker daemon": "",
"Cache image from remote registry": "",
@ -148,7 +151,6 @@
"Downloading Kubernetes {{.version}} preload ...": "Kubernetes {{.version}} のダウンロードの準備をしています",
"Downloading VM boot image ...": "VM ブートイメージをダウンロードしています...",
"Downloading driver {{.driver}}:": "{{.driver}} ドライバをダウンロードしています:",
"Due to networking limitations of driver {{.driver_name}} on {{.os_name}}, {{.addon_name}} addon is not supported.\nAlternatively to use this addon you can use a vm-based driver:\n\n\t'minikube start --vm=true'\n\nTo track the update on this work in progress feature please check:\nhttps://github.com/kubernetes/minikube/issues/7332": "",
"Due to networking limitations of driver {{.driver_name}}, {{.addon_name}} addon is not fully supported. Try using a different driver.": "",
"ERROR creating `registry-creds-acr` secret": "`registry-creds-acr` シークレット作成中にエラーが発生しました",
"ERROR creating `registry-creds-dpr` secret": "`registry-creds-dpr` シークレット作成中にエラーが発生しました",
@ -242,6 +244,8 @@
"Failed to load image": "",
"Failed to persist images": "",
"Failed to pull image": "",
"Failed to pull images": "",
"Failed to push images": "",
"Failed to reload cached images": "",
"Failed to remove image": "",
"Failed to save config {{.profile}}": "",
@ -252,6 +256,7 @@
"Failed to start container runtime": "",
"Failed to start {{.driver}} {{.driver_type}}. Running \"{{.cmd}}\" may fix it: {{.error}}": "",
"Failed to stop node {{.name}}": "",
"Failed to tag images": "",
"Failed to update cluster": "",
"Failed to update config": "",
"Failed unmount: {{.error}}": "",
@ -396,6 +401,7 @@
"Number of extra disks created and attached to the minikube VM (currently only implemented for hyperkit driver)": "",
"Number of lines back to go within the log": "",
"OS release is {{.pretty_name}}": "OS は {{.pretty_name}} です。",
"One of 'text', 'yaml' or 'json'.": "",
"One of 'yaml' or 'json'.": "",
"Only alphanumeric and dashes '-' are permitted. Minimum 1 character, starting with alphanumeric.": "",
"Only alphanumeric and dashes '-' are permitted. Minimum 2 characters, starting with alphanumeric.": "",
@ -428,6 +434,7 @@
"Please make sure the service you are looking for is deployed or is in the correct namespace.": "",
"Please provide a path or url to build": "",
"Please provide an image in your local daemon to load into minikube via \u003cminikube image load IMAGE_NAME\u003e": "",
"Please provide source and target image": "",
"Please re-eval your docker-env, To ensure your environment variables have updated ports:\n\n\t'minikube -p {{.profile_name}} docker-env'\n\n\t": "",
"Please re-eval your podman-env, To ensure your environment variables have updated ports:\n\n\t'minikube -p {{.profile_name}} podman-env'\n\n\t": "",
"Please see {{.documentation_url}} for more details": "",
@ -453,8 +460,10 @@
"Profile name '{{.profilename}}' is not valid": "",
"Profile name should be unique": "",
"Provide VM UUID to restore MAC address (hyperkit driver only)": "MAC アドレスを復元するための VM UUID を指定しますhyperkit ドライバのみ)",
"Pull images": "",
"Pull the remote image (no caching)": "",
"Pulling base image ...": "イメージを Pull しています...",
"Push images": "",
"Push the new image (requires tag)": "",
"Reboot to complete VirtualBox installation, verify that VirtualBox is not blocked by your system, and/or use another hypervisor": "",
"Rebuild libvirt with virt-network support": "",
@ -576,6 +585,7 @@
"Suggestion: {{.advice}}": "提案: {{.advice}}",
"Suggestion: {{.fix}}": "提案: {{.fix}}",
"System only has {{.size}}MiB available, less than the required {{.req}}MiB for Kubernetes": "",
"Tag images": "",
"Tag to apply to the new image (optional)": "",
"Target directory {{.path}} must be an absolute path": "",
"Target {{.path}} can not be empty": "",
@ -659,7 +669,7 @@
"The podman-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/": "",
"The requested memory allocation of {{.requested}}MiB does not leave room for system overhead (total system memory: {{.system_limit}}MiB). You may face stability issues.": "",
"The service namespace": "",
"The service {{.service}} requires privileged ports to be exposed: {{.ports}}": "",
"The service/ingress {{.resource}} requires privileged ports to be exposed: {{.ports}}": "",
"The services namespace": "",
"The time interval for each check that wait performs in seconds": "",
"The value passed to --format is invalid": "",
@ -854,11 +864,13 @@
"error provisioning guest": "",
"error starting tunnel": "tunnel を開始する際にエラーが発生しました",
"error stopping tunnel": "tunnel を停止する際にエラーが発生しました",
"error: --output must be 'text', 'yaml' or 'json'": "",
"error: --output must be 'yaml' or 'json'": "エラーです。 --output は「 yaml 」、あるいは「 json 」である必要があります",
"experimental": "",
"failed to add node": "",
"failed to open browser: {{.error}}": "ブラウザを起動するのに失敗しました。 {{.error}}",
"failed to save config": "",
"failed to set cloud shell kubelet config options": "",
"failed to start node": "",
"fish completion failed": "",
"fish completion.": "",

View File

@ -23,6 +23,7 @@
"- {{.logPath}}": "",
"--kvm-numa-count range is 1-8": "--kvm-numa-count 범위는 1부터 8입니다",
"--network flag is only valid with the docker/podman and KVM drivers, it will be ignored": "",
"127.0.0.1": "",
"\u003ctarget file absolute path\u003e must be an absolute Path. Relative Path is not allowed (example: \"/home/docker/copied.txt\")": "",
"==\u003e Audit \u003c==": "",
"==\u003e Last Start \u003c==": "",
@ -73,6 +74,8 @@
"Bridge CNI is incompatible with multi-node clusters, use a different CNI": "",
"Build a container image in minikube": "minikube 내 컨테이너 이미지를 빌드합니다",
"Build a container image, using the container runtime.": "컨테이너 런타임을 사용하여 컨테이너 이미지를 빌드합니다.",
"CGroup allocation is not available in your environment, You might be running minikube in a nested container. Try running:\n\t\t\t\n\tminikube start --extra-config=kubelet.cgroups-per-qos=false --extra-config=kubelet.enforce-node-allocatable=\"\"\n\n\t\t\t\n\t\t\t": "",
"CGroup allocation is not available in your environment. You might be running minikube in a nested container. Try running:\n\t\t\t\n\tminikube start --extra-config=kubelet.cgroups-per-qos=false --extra-config=kubelet.enforce-node-allocatable=\"\"\n\n\t\t\t\n\t\t\t": "",
"CNI plug-in to use. Valid options: auto, bridge, calico, cilium, flannel, kindnet, or path to a CNI manifest (default: auto)": "",
"Cache image from docker daemon": "도커 데몬의 캐시 이미지",
"Cache image from remote registry": "원격 레지스트리의 캐시 이미지",
@ -160,7 +163,6 @@
"Downloading VM boot image ...": "가상 머신 부트 이미지 다운로드 중 ...",
"Downloading driver {{.driver}}:": "드라이버 {{.driver}} 다운로드 중 :",
"Downloading {{.name}} {{.version}}": "{{.name}} {{.version}} 다운로드 중",
"Due to networking limitations of driver {{.driver_name}} on {{.os_name}}, {{.addon_name}} addon is not supported.\nAlternatively to use this addon you can use a vm-based driver:\n\n\t'minikube start --vm=true'\n\nTo track the update on this work in progress feature please check:\nhttps://github.com/kubernetes/minikube/issues/7332": "",
"Due to networking limitations of driver {{.driver_name}}, {{.addon_name}} addon is not fully supported. Try using a different driver.": "",
"ERROR creating `registry-creds-acr` secret": "registry-creds-acr` secret 생성 오류",
"ERROR creating `registry-creds-dpr` secret": "`registry-creds-dpr` secret 생성 오류",
@ -269,6 +271,8 @@
"Failed to load image": "",
"Failed to persist images": "",
"Failed to pull image": "",
"Failed to pull images": "",
"Failed to push images": "",
"Failed to reload cached images": "캐시된 이미지를 다시 불러오는 데 실패하였습니다",
"Failed to remove image": "",
"Failed to save config": "컨피그 저장에 실패하였습니다",
@ -282,6 +286,7 @@
"Failed to start node {{.name}}": "노드 {{.name}} 시작에 실패하였습니다",
"Failed to start {{.driver}} {{.driver_type}}. Running \"{{.cmd}}\" may fix it: {{.error}}": "",
"Failed to stop node {{.name}}": "노드 {{.name}} 중지에 실패하였습니다",
"Failed to tag images": "",
"Failed to update cluster": "클러스터를 수정하는 데 실패하였습니다",
"Failed to update config": "컨피그를 수정하는 데 실패하였습니다",
"Failed unmount: {{.error}}": "마운트 해제에 실패하였습니다: {{.error}}",
@ -421,6 +426,7 @@
"Number of extra disks created and attached to the minikube VM (currently only implemented for hyperkit driver)": "",
"Number of lines back to go within the log": "",
"OS release is {{.pretty_name}}": "",
"One of 'text', 'yaml' or 'json'.": "",
"One of 'yaml' or 'json'.": "",
"Only alphanumeric and dashes '-' are permitted. Minimum 1 character, starting with alphanumeric.": "",
"Only alphanumeric and dashes '-' are permitted. Minimum 2 characters, starting with alphanumeric.": "",
@ -452,6 +458,7 @@
"Please make sure the service you are looking for is deployed or is in the correct namespace.": "",
"Please provide a path or url to build": "",
"Please provide an image in your local daemon to load into minikube via \u003cminikube image load IMAGE_NAME\u003e": "",
"Please provide source and target image": "",
"Please re-eval your docker-env, To ensure your environment variables have updated ports:\n\n\t'minikube -p {{.profile_name}} docker-env'\n\n\t": "",
"Please re-eval your podman-env, To ensure your environment variables have updated ports:\n\n\t'minikube -p {{.profile_name}} podman-env'\n\n\t": "",
"Please see {{.documentation_url}} for more details": "",
@ -476,8 +483,10 @@
"Profile name '{{.profilename}}' is not valid": "",
"Profile name should be unique": "",
"Provide VM UUID to restore MAC address (hyperkit driver only)": "",
"Pull images": "",
"Pull the remote image (no caching)": "",
"Pulling base image ...": "베이스 이미지를 다운받는 중 ...",
"Push images": "",
"Push the new image (requires tag)": "",
"Reboot to complete VirtualBox installation, verify that VirtualBox is not blocked by your system, and/or use another hypervisor": "",
"Rebuild libvirt with virt-network support": "",
@ -598,6 +607,7 @@
"Successfully stopped node {{.name}}": "{{.name}} 노드가 정상적으로 중지되었습니다",
"Suggestion: {{.advice}}": "권장: {{.advice}}",
"System only has {{.size}}MiB available, less than the required {{.req}}MiB for Kubernetes": "",
"Tag images": "",
"Tag to apply to the new image (optional)": "",
"Target directory {{.path}} must be an absolute path": "타겟 폴더 {{.path}} 는 절대 경로여야 합니다",
"Target {{.path}} can not be empty": "",
@ -670,7 +680,7 @@
"The podman-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/": "",
"The requested memory allocation of {{.requested}}MiB does not leave room for system overhead (total system memory: {{.system_limit}}MiB). You may face stability issues.": "",
"The service namespace": "",
"The service {{.service}} requires privileged ports to be exposed: {{.ports}}": "",
"The service/ingress {{.resource}} requires privileged ports to be exposed: {{.ports}}": "",
"The services namespace": "",
"The time interval for each check that wait performs in seconds": "",
"The value passed to --format is invalid": "",
@ -856,11 +866,13 @@
"error provisioning guest": "",
"error starting tunnel": "",
"error stopping tunnel": "",
"error: --output must be 'text', 'yaml' or 'json'": "",
"error: --output must be 'yaml' or 'json'": "",
"experimental": "",
"failed to add node": "",
"failed to open browser: {{.error}}": "",
"failed to save config": "",
"failed to set cloud shell kubelet config options": "",
"failed to start node": "",
"fish completion failed": "",
"fish completion.": "",

View File

@ -22,6 +22,7 @@
"- {{.logPath}}": "",
"--kvm-numa-count range is 1-8": "",
"--network flag is only valid with the docker/podman and KVM drivers, it will be ignored": "",
"127.0.0.1": "",
"\u003ctarget file absolute path\u003e must be an absolute Path. Relative Path is not allowed (example: \"/home/docker/copied.txt\")": "",
"==\u003e Audit \u003c==": "==\u003e Audyt \u003c==",
"==\u003e Last Start \u003c==": "==\u003e Ostatni start \u003c==",
@ -70,6 +71,8 @@
"Bridge CNI is incompatible with multi-node clusters, use a different CNI": "",
"Build a container image in minikube": "Zbuduj obraz kontenera w minikube",
"Build a container image, using the container runtime.": "Zbuduj obraz kontenera używając środowiska uruchomieniowego kontenera",
"CGroup allocation is not available in your environment, You might be running minikube in a nested container. Try running:\n\t\t\t\n\tminikube start --extra-config=kubelet.cgroups-per-qos=false --extra-config=kubelet.enforce-node-allocatable=\"\"\n\n\t\t\t\n\t\t\t": "",
"CGroup allocation is not available in your environment. You might be running minikube in a nested container. Try running:\n\t\t\t\n\tminikube start --extra-config=kubelet.cgroups-per-qos=false --extra-config=kubelet.enforce-node-allocatable=\"\"\n\n\t\t\t\n\t\t\t": "",
"CNI plug-in to use. Valid options: auto, bridge, calico, cilium, flannel, kindnet, or path to a CNI manifest (default: auto)": "",
"Cache image from docker daemon": "",
"Cache image from remote registry": "",
@ -160,7 +163,6 @@
"Downloading VM boot image ...": "Pobieranie obrazu maszyny wirtualnej ...",
"Downloading driver {{.driver}}:": "",
"Downloading {{.name}} {{.version}}": "Pobieranie {{.name}} {{.version}}",
"Due to networking limitations of driver {{.driver_name}} on {{.os_name}}, {{.addon_name}} addon is not supported.\nAlternatively to use this addon you can use a vm-based driver:\n\n\t'minikube start --vm=true'\n\nTo track the update on this work in progress feature please check:\nhttps://github.com/kubernetes/minikube/issues/7332": "",
"Due to networking limitations of driver {{.driver_name}}, {{.addon_name}} addon is not fully supported. Try using a different driver.": "",
"ERROR creating `registry-creds-acr` secret": "",
"ERROR creating `registry-creds-dpr` secret": "",
@ -256,6 +258,8 @@
"Failed to load image": "",
"Failed to persist images": "",
"Failed to pull image": "",
"Failed to pull images": "",
"Failed to push images": "",
"Failed to reload cached images": "",
"Failed to remove image": "",
"Failed to remove profile": "Usunięcie profilu nie powiodło się",
@ -269,6 +273,7 @@
"Failed to start container runtime": "",
"Failed to start {{.driver}} {{.driver_type}}. Running \"{{.cmd}}\" may fix it: {{.error}}": "",
"Failed to stop node {{.name}}": "",
"Failed to tag images": "",
"Failed to update cluster": "Aktualizacja klastra nie powiodła się",
"Failed to update config": "Aktualizacja konfiguracji nie powiodła się",
"Failed unmount: {{.error}}": "",
@ -413,6 +418,7 @@
"Number of extra disks created and attached to the minikube VM (currently only implemented for hyperkit driver)": "",
"Number of lines back to go within the log": "",
"OS release is {{.pretty_name}}": "Wersja systemu operacyjnego to {{.pretty_name}}",
"One of 'text', 'yaml' or 'json'.": "",
"One of 'yaml' or 'json'.": "Jeden z dwóćh formatów - 'yaml' lub 'json'",
"Only alphanumeric and dashes '-' are permitted. Minimum 1 character, starting with alphanumeric.": "Tylko znaki alfanumeryczne oraz myślniki '-' są dozwolone. Co najmniej jeden znak, zaczynając od znaku alfanumerycznego",
"Only alphanumeric and dashes '-' are permitted. Minimum 2 characters, starting with alphanumeric.": "Tylko znaki alfanumeryczne oraz myślniki '-' są dozwolone. Co najmniej dwa znaki, zaczynając od znaku alfanumerycznego",
@ -445,6 +451,7 @@
"Please make sure the service you are looking for is deployed or is in the correct namespace.": "Proszę upewnij się, że serwis którego szukasz znajduje się w prawidłowej przestrzeni nazw",
"Please provide a path or url to build": "",
"Please provide an image in your local daemon to load into minikube via \u003cminikube image load IMAGE_NAME\u003e": "",
"Please provide source and target image": "",
"Please re-eval your docker-env, To ensure your environment variables have updated ports:\n\n\t'minikube -p {{.profile_name}} docker-env'\n\n\t": "",
"Please re-eval your podman-env, To ensure your environment variables have updated ports:\n\n\t'minikube -p {{.profile_name}} podman-env'\n\n\t": "",
"Please see {{.documentation_url}} for more details": "Zobacz {{.documentation_url}} żeby uzyskać więcej informacji",
@ -471,8 +478,10 @@
"Profile name '{{.profilename}}' is not valid": "",
"Profile name should be unique": "",
"Provide VM UUID to restore MAC address (hyperkit driver only)": "",
"Pull images": "",
"Pull the remote image (no caching)": "",
"Pulling base image ...": "",
"Push images": "",
"Push the new image (requires tag)": "",
"Reboot to complete VirtualBox installation, and verify that VirtualBox is not blocked by your system": "Uruchom ponownie komputer aby zakończyć instalację VirtualBox'a i upewnij się, że nie jest on blokowany przez twój system",
"Reboot to complete VirtualBox installation, verify that VirtualBox is not blocked by your system, and/or use another hypervisor": "",
@ -597,6 +606,7 @@
"Successfully stopped node {{.name}}": "",
"Suggestion: {{.advice}}": "Sugestia: {{.advice}}",
"System only has {{.size}}MiB available, less than the required {{.req}}MiB for Kubernetes": "",
"Tag images": "",
"Tag to apply to the new image (optional)": "",
"Target directory {{.path}} must be an absolute path": "",
"Target {{.path}} can not be empty": "",
@ -678,7 +688,7 @@
"The podman-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/": "",
"The requested memory allocation of {{.requested}}MiB does not leave room for system overhead (total system memory: {{.system_limit}}MiB). You may face stability issues.": "",
"The service namespace": "",
"The service {{.service}} requires privileged ports to be exposed: {{.ports}}": "",
"The service/ingress {{.resource}} requires privileged ports to be exposed: {{.ports}}": "",
"The services namespace": "",
"The time interval for each check that wait performs in seconds": "",
"The value passed to --format is invalid": "Wartość przekazana do --format jest nieprawidłowa",
@ -862,11 +872,13 @@
"error provisioning guest": "",
"error starting tunnel": "",
"error stopping tunnel": "",
"error: --output must be 'text', 'yaml' or 'json'": "",
"error: --output must be 'yaml' or 'json'": "",
"experimental": "",
"failed to add node": "",
"failed to open browser: {{.error}}": "Nie udało się otworzyć przeglądarki: {{.error}}",
"failed to save config": "",
"failed to set cloud shell kubelet config options": "",
"failed to start node": "",
"fish completion failed": "",
"fish completion.": "",

View File

@ -17,6 +17,7 @@
"- {{.logPath}}": "",
"--kvm-numa-count range is 1-8": "",
"--network flag is only valid with the docker/podman and KVM drivers, it will be ignored": "",
"127.0.0.1": "",
"\u003ctarget file absolute path\u003e must be an absolute Path. Relative Path is not allowed (example: \"/home/docker/copied.txt\")": "",
"==\u003e Audit \u003c==": "",
"==\u003e Last Start \u003c==": "",
@ -64,6 +65,8 @@
"Bridge CNI is incompatible with multi-node clusters, use a different CNI": "",
"Build a container image in minikube": "",
"Build a container image, using the container runtime.": "",
"CGroup allocation is not available in your environment, You might be running minikube in a nested container. Try running:\n\t\t\t\n\tminikube start --extra-config=kubelet.cgroups-per-qos=false --extra-config=kubelet.enforce-node-allocatable=\"\"\n\n\t\t\t\n\t\t\t": "",
"CGroup allocation is not available in your environment. You might be running minikube in a nested container. Try running:\n\t\t\t\n\tminikube start --extra-config=kubelet.cgroups-per-qos=false --extra-config=kubelet.enforce-node-allocatable=\"\"\n\n\t\t\t\n\t\t\t": "",
"CNI plug-in to use. Valid options: auto, bridge, calico, cilium, flannel, kindnet, or path to a CNI manifest (default: auto)": "",
"Cache image from docker daemon": "",
"Cache image from remote registry": "",
@ -143,7 +146,6 @@
"Downloading Kubernetes {{.version}} preload ...": "",
"Downloading VM boot image ...": "",
"Downloading driver {{.driver}}:": "",
"Due to networking limitations of driver {{.driver_name}} on {{.os_name}}, {{.addon_name}} addon is not supported.\nAlternatively to use this addon you can use a vm-based driver:\n\n\t'minikube start --vm=true'\n\nTo track the update on this work in progress feature please check:\nhttps://github.com/kubernetes/minikube/issues/7332": "",
"Due to networking limitations of driver {{.driver_name}}, {{.addon_name}} addon is not fully supported. Try using a different driver.": "",
"ERROR creating `registry-creds-acr` secret": "",
"ERROR creating `registry-creds-dpr` secret": "",
@ -234,6 +236,7 @@
"Failed to persist images": "",
"Failed to pull image": "",
"Failed to pull images": "",
"Failed to push images": "",
"Failed to reload cached images": "",
"Failed to remove image": "",
"Failed to save config {{.profile}}": "",
@ -377,6 +380,7 @@
"Number of extra disks created and attached to the minikube VM (currently only implemented for hyperkit driver)": "",
"Number of lines back to go within the log": "",
"OS release is {{.pretty_name}}": "",
"One of 'text', 'yaml' or 'json'.": "",
"One of 'yaml' or 'json'.": "",
"Only alphanumeric and dashes '-' are permitted. Minimum 1 character, starting with alphanumeric.": "",
"Only alphanumeric and dashes '-' are permitted. Minimum 2 characters, starting with alphanumeric.": "",
@ -436,6 +440,7 @@
"Pull images": "",
"Pull the remote image (no caching)": "",
"Pulling base image ...": "",
"Push images": "",
"Push the new image (requires tag)": "",
"Reboot to complete VirtualBox installation, verify that VirtualBox is not blocked by your system, and/or use another hypervisor": "",
"Rebuild libvirt with virt-network support": "",
@ -622,7 +627,7 @@
"The podman-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/": "",
"The requested memory allocation of {{.requested}}MiB does not leave room for system overhead (total system memory: {{.system_limit}}MiB). You may face stability issues.": "",
"The service namespace": "",
"The service {{.service}} requires privileged ports to be exposed: {{.ports}}": "",
"The service/ingress {{.resource}} requires privileged ports to be exposed: {{.ports}}": "",
"The services namespace": "",
"The time interval for each check that wait performs in seconds": "",
"The value passed to --format is invalid": "",
@ -796,11 +801,13 @@
"error provisioning guest": "",
"error starting tunnel": "",
"error stopping tunnel": "",
"error: --output must be 'text', 'yaml' or 'json'": "",
"error: --output must be 'yaml' or 'json'": "",
"experimental": "",
"failed to add node": "",
"failed to open browser: {{.error}}": "",
"failed to save config": "",
"failed to set cloud shell kubelet config options": "",
"failed to start node": "",
"fish completion failed": "",
"fish completion.": "",

View File

@ -24,6 +24,7 @@
"- {{.logPath}}": "",
"--kvm-numa-count range is 1-8": "",
"--network flag is only valid with the docker/podman and KVM drivers, it will be ignored": "",
"127.0.0.1": "",
"\u003ctarget file absolute path\u003e must be an absolute Path. Relative Path is not allowed (example: \"/home/docker/copied.txt\")": "",
"==\u003e Audit \u003c==": "",
"==\u003e Last Start \u003c==": "",
@ -84,6 +85,8 @@
"Bridge CNI is incompatible with multi-node clusters, use a different CNI": "",
"Build a container image in minikube": "",
"Build a container image, using the container runtime.": "",
"CGroup allocation is not available in your environment, You might be running minikube in a nested container. Try running:\n\t\t\t\n\tminikube start --extra-config=kubelet.cgroups-per-qos=false --extra-config=kubelet.enforce-node-allocatable=\"\"\n\n\t\t\t\n\t\t\t": "",
"CGroup allocation is not available in your environment. You might be running minikube in a nested container. Try running:\n\t\t\t\n\tminikube start --extra-config=kubelet.cgroups-per-qos=false --extra-config=kubelet.enforce-node-allocatable=\"\"\n\n\t\t\t\n\t\t\t": "",
"CNI plug-in to use. Valid options: auto, bridge, calico, cilium, flannel, kindnet, or path to a CNI manifest (default: auto)": "",
"Cache image from docker daemon": "",
"Cache image from remote registry": "",
@ -185,7 +188,6 @@
"Downloading VM boot image ...": "正在下载 VM boot image...",
"Downloading driver {{.driver}}:": "正在下载驱动 {{.driver}}:",
"Downloading {{.name}} {{.version}}": "正在下载 {{.name}} {{.version}}",
"Due to networking limitations of driver {{.driver_name}} on {{.os_name}}, {{.addon_name}} addon is not supported.\nAlternatively to use this addon you can use a vm-based driver:\n\n\t'minikube start --vm=true'\n\nTo track the update on this work in progress feature please check:\nhttps://github.com/kubernetes/minikube/issues/7332": "",
"Due to networking limitations of driver {{.driver_name}}, {{.addon_name}} addon is not fully supported. Try using a different driver.": "",
"ERROR creating `registry-creds-acr` secret": "",
"ERROR creating `registry-creds-dpr` secret": "创建 `registry-creds-dpr` secret 时出错",
@ -319,6 +321,8 @@
"Failed to load image": "",
"Failed to persist images": "",
"Failed to pull image": "",
"Failed to pull images": "",
"Failed to push images": "",
"Failed to reload cached images": "重新加载缓存镜像失败",
"Failed to remove image": "",
"Failed to remove profile": "无法删除配置文件",
@ -333,6 +337,7 @@
"Failed to start container runtime": "",
"Failed to start {{.driver}} {{.driver_type}}. Running \"{{.cmd}}\" may fix it: {{.error}}": "",
"Failed to stop node {{.name}}": "",
"Failed to tag images": "",
"Failed to update cluster": "更新 cluster 失败",
"Failed to update config": "更新 config 失败",
"Failed unmount: {{.error}}": "unmount 失败:{{.error}}",
@ -486,6 +491,7 @@
"Number of extra disks created and attached to the minikube VM (currently only implemented for hyperkit driver)": "",
"Number of lines back to go within the log": "",
"OS release is {{.pretty_name}}": "",
"One of 'text', 'yaml' or 'json'.": "",
"One of 'yaml' or 'json'.": "",
"Only alphanumeric and dashes '-' are permitted. Minimum 1 character, starting with alphanumeric.": "",
"Only alphanumeric and dashes '-' are permitted. Minimum 2 characters, starting with alphanumeric.": "",
@ -519,6 +525,7 @@
"Please make sure the service you are looking for is deployed or is in the correct namespace.": "",
"Please provide a path or url to build": "",
"Please provide an image in your local daemon to load into minikube via \u003cminikube image load IMAGE_NAME\u003e": "",
"Please provide source and target image": "",
"Please re-eval your docker-env, To ensure your environment variables have updated ports:\n\n\t'minikube -p {{.profile_name}} docker-env'\n\n\t": "",
"Please re-eval your podman-env, To ensure your environment variables have updated ports:\n\n\t'minikube -p {{.profile_name}} podman-env'\n\n\t": "",
"Please see {{.documentation_url}} for more details": "",
@ -546,9 +553,11 @@
"Profile name '{{.profilename}}' is not valid": "",
"Profile name should be unique": "",
"Provide VM UUID to restore MAC address (hyperkit driver only)": "提供虚拟机 UUID 以恢复 MAC 地址(仅限 hyperkit 驱动程序)",
"Pull images": "",
"Pull the remote image (no caching)": "",
"Pulling base image ...": "",
"Pulling images ...": "拉取镜像 ...",
"Push images": "",
"Push the new image (requires tag)": "",
"Reboot to complete VirtualBox installation, verify that VirtualBox is not blocked by your system, and/or use another hypervisor": "重启以完成 VirtualBox 安装,检查 VirtualBox 未被您的操作系统禁用,或者使用其他的管理程序。",
"Rebuild libvirt with virt-network support": "",
@ -681,6 +690,7 @@
"Suggestion: {{.advice}}": "建议:{{.advice}}",
"Suggestion: {{.fix}}": "建议:{{.fix}}",
"System only has {{.size}}MiB available, less than the required {{.req}}MiB for Kubernetes": "",
"Tag images": "",
"Tag to apply to the new image (optional)": "",
"Target directory {{.path}} must be an absolute path": "",
"Target {{.path}} can not be empty": "",
@ -766,7 +776,7 @@
"The podman-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/": "",
"The requested memory allocation of {{.requested}}MiB does not leave room for system overhead (total system memory: {{.system_limit}}MiB). You may face stability issues.": "",
"The service namespace": "",
"The service {{.service}} requires privileged ports to be exposed: {{.ports}}": "",
"The service/ingress {{.resource}} requires privileged ports to be exposed: {{.ports}}": "",
"The services namespace": "",
"The time interval for each check that wait performs in seconds": "",
"The value passed to --format is invalid": "",
@ -968,11 +978,13 @@
"error provisioning guest": "",
"error starting tunnel": "",
"error stopping tunnel": "",
"error: --output must be 'text', 'yaml' or 'json'": "",
"error: --output must be 'yaml' or 'json'": "",
"experimental": "",
"failed to add node": "",
"failed to open browser: {{.error}}": "",
"failed to save config": "",
"failed to set cloud shell kubelet config options": "",
"failed to start node": "",
"fish completion failed": "",
"fish completion.": "",