diff --git a/.github/ISSUE_TEMPLATE/__en-US.md b/.github/ISSUE_TEMPLATE/__en-US.md index 261f435c2d..0e78dda079 100644 --- a/.github/ISSUE_TEMPLATE/__en-US.md +++ b/.github/ISSUE_TEMPLATE/__en-US.md @@ -2,21 +2,24 @@ name: English about: Report an issue --- - + +**Steps to reproduce the issue:** -**The exact command to reproduce the issue**: +1. +2. +3. + + +**Full output of failed command:** -**The full output of the command that failed**:
+**Full output of `minikube start` command used, if not already included:** + + + +**Optional: Full output of `minikube logs` command:** +
- -**The output of the `minikube logs` command**:
- - - -
- -**The operating system version**: diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index a3572ed151..68a9561d78 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -19,6 +19,8 @@ jobs: run : | make minikube-linux-amd64 make e2e-linux-amd64 + make minikube-windows-amd64.exe + make e2e-windows-amd64.exe cp -r test/integration/testdata ./out whoami echo github ref $GITHUB_REF @@ -81,24 +83,43 @@ jobs: GOPOGH_RESULT: "" SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 runs-on: ubuntu-16.04 - steps: + steps: + - name: Install kubectl + shell: bash + run: | + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl + sudo install kubectl /usr/local/bin/kubectl + kubectl version --client=true - name: Docker Info shell: bash run: | - docker info || true + echo "--------------------------" docker version || true + echo "--------------------------" + docker info || true + echo "--------------------------" + docker system df || true + echo "--------------------------" + docker system info || true + echo "--------------------------" docker ps || true + echo "--------------------------" + - name: Install lz4 + shell: bash + run: | + sudo apt-get update -qq + sudo apt-get -qq -y install liblz4-tool - name: Install gopogh shell: bash run: | - curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.16/gopogh-linux-amd64 + curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.18/gopogh-linux-amd64 sudo install gopogh-linux-amd64 /usr/local/bin/gopogh - name: Download Binaries uses: actions/download-artifact@v1 with: name: minikube_binaries - name: Run Integration Test - continue-on-error: true + continue-on-error: false # bash {0} to allow test to continue to next step. in case of shell: bash {0} run: | @@ -108,7 +129,7 @@ jobs: chmod a+x e2e-* chmod a+x minikube-* START_TIME=$(date -u +%s) - KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--vm-driver=docker -test.timeout=70m -test.v -timeout-multiplier=3 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt + KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--vm-driver=docker -test.timeout=80m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt END_TIME=$(date -u +%s) TIME_ELAPSED=$(($END_TIME-$START_TIME)) min=$((${TIME_ELAPSED}/60)) @@ -140,6 +161,8 @@ jobs: echo "----------------${numFail} Failures----------------------------" echo $STAT | jq '.FailedTests' || true echo "-------------------------------------------------------" + numPass=$(echo $STAT | jq '.NumberOfPass') + echo "*** $numPass Passed ***" if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi docker_ubuntu_18_04: runs-on: ubuntu-18.04 @@ -150,16 +173,35 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 needs: [build_minikube] steps: + - name: Install kubectl + shell: bash + run: | + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl + sudo install kubectl /usr/local/bin/kubectl + kubectl version --client=true + - name: Install lz4 + shell: bash + run: | + sudo apt-get update -qq + sudo apt-get -qq -y install liblz4-tool - name: Docker Info shell: bash run: | - docker info || true + echo "--------------------------" docker version || true + echo "--------------------------" + docker info || true + echo "--------------------------" + docker system df || true + echo "--------------------------" + docker system info || true + echo "--------------------------" docker ps || true + echo "--------------------------" - name: Install gopogh shell: bash run: | - curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.16/gopogh-linux-amd64 + curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.18/gopogh-linux-amd64 sudo install gopogh-linux-amd64 /usr/local/bin/gopogh - name: Download Binaries uses: actions/download-artifact@v1 @@ -176,7 +218,7 @@ jobs: chmod a+x e2e-* chmod a+x minikube-* START_TIME=$(date -u +%s) - KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.timeout=70m -test.v -timeout-multiplier=3 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt + KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.timeout=80m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt END_TIME=$(date -u +%s) TIME_ELAPSED=$(($END_TIME-$START_TIME)) min=$((${TIME_ELAPSED}/60)) @@ -208,7 +250,73 @@ jobs: echo "----------------${numFail} Failures----------------------------" echo $STAT | jq '.FailedTests' || true echo "-------------------------------------------------------" + numPass=$(echo $STAT | jq '.NumberOfPass') + echo "*** $numPass Passed ***" if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi + docker_on_windows: + needs: [build_minikube] + env: + TIME_ELAPSED: time + JOB_NAME: "Docker_on_windows" + COMMIT_STATUS: "" + runs-on: windows-latest + steps: + - uses: actions/checkout@v2 + - name: Docker Info + shell: bash + run: | + docker info || true + docker version || true + docker ps || true + - name: Download gopogh + run: | + curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.16/gopogh.exe + shell: bash + - name: Download binaries + uses: actions/download-artifact@v1 + with: + name: minikube_binaries + - name: run integration test + continue-on-error: true + run: | + set +euo pipefail + mkdir -p report + mkdir -p testhome + START_TIME=$(date -u +%s) + KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome minikube_binaries/e2e-windows-amd64.exe -minikube-start-args=--vm-driver=docker -binary=minikube_binaries/minikube-windows-amd64.exe -test.v -test.timeout=65m 2>&1 | tee ./report/testout.txt + END_TIME=$(date -u +%s) + TIME_ELAPSED=$(($END_TIME-$START_TIME)) + min=$((${TIME_ELAPSED}/60)) + sec=$((${TIME_ELAPSED}%60)) + TIME_ELAPSED="${min} min $sec seconds" + echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED} + shell: bash + - name: Generate html report + run: | + go tool test2json -t < ./report/testout.txt > ./report/testout.json || true + STAT=$(${GITHUB_WORKSPACE}/gopogh.exe -in ./report/testout.json -out ./report/testout.html -name " $GITHUB_REF" -repo "${JOB_NAME} ${GITHUB_REF} ${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true + echo status: ${STAT} + FailNum=$(echo $STAT | jq '.NumberOfFail') + TestsNum=$(echo $STAT | jq '.NumberOfTests') + GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}" + echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT} + echo ::set-env name=STAT::${STAT} + shell: bash + - uses: actions/upload-artifact@v1 + with: + name: docker_on_windows + path: report + - name: The End Result + run: | + echo ${GOPOGH_RESULT} + numFail=$(echo $STAT | jq '.NumberOfFail') + echo "----------------${numFail} Failures----------------------------" + echo $STAT | jq '.FailedTests' || true + echo "--------------------------------------------" + numPass=$(echo $STAT | jq '.NumberOfPass') + echo "*** $numPass Passed ***" + if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi + shell: bash none_ubuntu16_04: needs: [build_minikube] env: @@ -218,10 +326,27 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 runs-on: ubuntu-16.04 steps: + - name: Install kubectl + shell: bash + run: | + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl + sudo install kubectl /usr/local/bin/kubectl + kubectl version --client=true + # conntrack is required for kubernetes 1.18 and higher + # socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon + - name: Install tools for none + shell: bash + run: | + sudo apt-get update -qq + sudo apt-get -qq -y install conntrack + sudo apt-get -qq -y install socat + VERSION="v1.17.0" + curl -L https://github.com/kubernetes-sigs/cri-tools/releases/download/$VERSION/crictl-${VERSION}-linux-amd64.tar.gz --output crictl-${VERSION}-linux-amd64.tar.gz + sudo tar zxvf crictl-$VERSION-linux-amd64.tar.gz -C /usr/local/bin - name: Install gopogh shell: bash run: | - curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.16/gopogh-linux-amd64 + curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.18/gopogh-linux-amd64 sudo install gopogh-linux-amd64 /usr/local/bin/gopogh - name: Download Binaries uses: actions/download-artifact@v1 @@ -238,7 +363,7 @@ jobs: chmod a+x e2e-* chmod a+x minikube-* START_TIME=$(date -u +%s) - KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=none -test.timeout=70m -test.v -timeout-multiplier=3 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt + KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=none -test.timeout=35m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt END_TIME=$(date -u +%s) TIME_ELAPSED=$(($END_TIME-$START_TIME)) min=$((${TIME_ELAPSED}/60)) @@ -270,6 +395,8 @@ jobs: echo "----------------${numFail} Failures----------------------------" echo $STAT | jq '.FailedTests' || true echo "-------------------------------------------------------" + numPass=$(echo $STAT | jq '.NumberOfPass') + echo "*** $numPass Passed ***" if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi none_ubuntu18_04: needs: [build_minikube] @@ -280,10 +407,27 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 runs-on: ubuntu-18.04 steps: + - name: Install kubectl + shell: bash + run: | + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl + sudo install kubectl /usr/local/bin/kubectl + kubectl version --client=true + # conntrack is required for kubernetes 1.18 and higher + # socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon + - name: Install tools for none + shell: bash + run: | + sudo apt-get update -qq + sudo apt-get -qq -y install conntrack + sudo apt-get -qq -y install socat + VERSION="v1.17.0" + curl -L https://github.com/kubernetes-sigs/cri-tools/releases/download/$VERSION/crictl-${VERSION}-linux-amd64.tar.gz --output crictl-${VERSION}-linux-amd64.tar.gz + sudo tar zxvf crictl-$VERSION-linux-amd64.tar.gz -C /usr/local/bin - name: Install gopogh shell: bash run: | - curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.16/gopogh-linux-amd64 + curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.18/gopogh-linux-amd64 sudo install gopogh-linux-amd64 /usr/local/bin/gopogh - name: Download Binaries uses: actions/download-artifact@v1 @@ -300,7 +444,7 @@ jobs: chmod a+x e2e-* chmod a+x minikube-* START_TIME=$(date -u +%s) - KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=none -test.timeout=70m -test.v -timeout-multiplier=3 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt + KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=none -test.timeout=35m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt END_TIME=$(date -u +%s) TIME_ELAPSED=$(($END_TIME-$START_TIME)) min=$((${TIME_ELAPSED}/60)) @@ -332,8 +476,10 @@ jobs: echo "----------------${numFail} Failures----------------------------" echo $STAT | jq '.FailedTests' || true echo "-------------------------------------------------------" + numPass=$(echo $STAT | jq '.NumberOfPass') + echo "*** $numPass Passed ***" if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi - podman_ubuntu_18_04: + podman_ubuntu_18_04_experimental: needs: [build_minikube] env: TIME_ELAPSED: time @@ -342,7 +488,13 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 runs-on: ubuntu-18.04 steps: - - name: install podman + - name: Install kubectl + shell: bash + run: | + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl + sudo install kubectl /usr/local/bin/kubectl + kubectl version --client=true + - name: Install podman shell: bash run: | . /etc/os-release @@ -356,7 +508,7 @@ jobs: - name: Install gopogh shell: bash run: | - curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.16/gopogh-linux-amd64 + curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.18/gopogh-linux-amd64 sudo install gopogh-linux-amd64 /usr/local/bin/gopogh - name: Download binaries uses: actions/download-artifact@v1 @@ -373,7 +525,7 @@ jobs: chmod a+x e2e-* chmod a+x minikube-* START_TIME=$(date -u +%s) - KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=podman -test.timeout=70m -test.v -timeout-multiplier=3 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt + KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=podman -test.timeout=30m -test.v -timeout-multiplier=1 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt END_TIME=$(date -u +%s) TIME_ELAPSED=$(($END_TIME-$START_TIME)) min=$((${TIME_ELAPSED}/60)) @@ -405,12 +557,14 @@ jobs: echo "----------------${numFail} Failures----------------------------" echo $STAT | jq '.FailedTests' || true echo "-------------------------------------------------------" + numPass=$(echo $STAT | jq '.NumberOfPass') + echo "*** $numPass Passed ***" if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi # After all 4 integration tests finished # collect all the reports and upload upload_all_reports: if: always() - needs: [docker_ubuntu_16_04,docker_ubuntu_18_04,none_ubuntu16_04,none_ubuntu18_04,podman_ubuntu_18_04] + needs: [docker_ubuntu_16_04,docker_ubuntu_18_04,none_ubuntu16_04,none_ubuntu18_04,podman_ubuntu_18_04_experimental] runs-on: ubuntu-18.04 steps: - name: Download Results docker_ubuntu_16_04 @@ -433,6 +587,15 @@ jobs: run: | mkdir -p all_reports cp -r docker_ubuntu_18_04 ./all_reports/ + - name: download results docker_on_windows + uses: actions/download-artifact@v1 + with: + name: docker_on_windows + - name: cp to all_report + shell: bash + run: | + mkdir -p all_reports + cp -r docker_on_windows ./all_reports/ - name: Download Results none_ubuntu16_04 uses: actions/download-artifact@v1 with: diff --git a/CHANGELOG.md b/CHANGELOG.md index a6d0871151..11bcc13997 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,170 @@ # Release Notes +## Version 1.9.2 - 2020-04-04 + +Minor improvements: + +* UX: Remove noisy debug statement [#7407](https://github.com/kubernetes/minikube/pull/7407) +* Feature: Make --wait more flexible [#7375](https://github.com/kubernetes/minikube/pull/7375) +* Docker: adjust warn if slow for ps and volume [#7410](https://github.com/kubernetes/minikube/pull/7410) +* Localization: Update Japanese translations [#7403](https://github.com/kubernetes/minikube/pull/7403) +* Performance: Parallelize updating cluster and setting up certs [#7394](https://github.com/kubernetes/minikube/pull/7394) +* Addons: allow ingress addon for docker/podman drivers only on linux for now [#7393](https://github.com/kubernetes/minikube/pull/7393) + +- Anders F Björklund +- Medya Ghazizadeh +- Prasad Katti +- Priya Wadhwa +- Thomas Strömberg +- tomocy + +## Version 1.9.1 - 2020-04-02 + +Improvements: + +* add delete-on-failure flag [#7345](https://github.com/kubernetes/minikube/pull/7345) +* Run dashboard with internal kubectl if not in path [#7299](https://github.com/kubernetes/minikube/pull/7299) +* Implement options for the minikube version command [#7325](https://github.com/kubernetes/minikube/pull/7325) +* service list cmd: display target port and name [#6879](https://github.com/kubernetes/minikube/pull/6879) +* Add rejection reason to 'unable to find driver' error [#7379](https://github.com/kubernetes/minikube/pull/7379) +* Update Japanese translations [#7359](https://github.com/kubernetes/minikube/pull/7359) + +Bug fixes: + +* Make eviction and image GC settings consistent across kubeadm API versions [#7364](https://github.com/kubernetes/minikube/pull/7364) +* Move errors and warnings to output to stderr [#7382](https://github.com/kubernetes/minikube/pull/7382) +* Correct assumptions for forwarded hostname & IP handling [#7360](https://github.com/kubernetes/minikube/pull/7360) +* Extend maximum stop retry from 30s to 120s [#7363](https://github.com/kubernetes/minikube/pull/7363) +* Use kubectl version --short if --output=json fails [#7356](https://github.com/kubernetes/minikube/pull/7356) +* Fix embed certs by updating kubeconfig after certs are populated [#7309](https://github.com/kubernetes/minikube/pull/7309) +* none: Use LookPath to verify conntrack install [#7305](https://github.com/kubernetes/minikube/pull/7305) +* Show all global flags in options command [#7292](https://github.com/kubernetes/minikube/pull/7292) +* Fix null deref in start host err [#7278](https://github.com/kubernetes/minikube/pull/7278) +* Increase Docker "slow" timeouts to 15s [#7268](https://github.com/kubernetes/minikube/pull/7268) +* none: check for docker and root uid [#7388](https://github.com/kubernetes/minikube/pull/7388) + +Thank you to our contributors for this release! + +- Anders F Björklund +- Dan Lorenc +- Eberhard Wolff +- John Laswell +- Marcin Niemira +- Medya Ghazizadeh +- Prasad Katti +- Priya Wadhwa +- Sharif Elgamal +- Thomas Strömberg +- Vincent Link +- anencore94 +- priyawadhwa +- re;i +- tomocy + +## Version 1.9.0 - 2020-03-26 + +New features & improvements + +* Update DefaultKubernetesVersion to v1.18.0 [#7235](https://github.com/kubernetes/minikube/pull/7235) +* Add --vm flag for users who want to autoselect only VM's [#7068](https://github.com/kubernetes/minikube/pull/7068) +* Add 'stable' and 'latest' as valid kubernetes-version values [#7212](https://github.com/kubernetes/minikube/pull/7212) + +* gpu addon: privileged mode no longer required [#7149](https://github.com/kubernetes/minikube/pull/7149) +* Add sch_tbf and extend filter ipset kernel module for bandwidth shaping [#7255](https://github.com/kubernetes/minikube/pull/7255) +* Parse --disk-size and --memory sizes with binary suffixes [#7206](https://github.com/kubernetes/minikube/pull/7206) + + +Bug Fixes + +* Re-initalize failed Kubernetes clusters [#7234](https://github.com/kubernetes/minikube/pull/7234) +* do not override hostname if extraConfig is specified [#7238](https://github.com/kubernetes/minikube/pull/7238) +* Enable HW_RANDOM_VIRTIO to fix sshd startup delays [#7208](https://github.com/kubernetes/minikube/pull/7208) +* hyperv Delete: call StopHost before removing VM [#7160](https://github.com/kubernetes/minikube/pull/7160) + +Huge thank you for this release towards our contributors: + +- Anders F Björklund +- Medya Ghazizadeh +- Priya Wadhwa +- Sharif Elgamal +- Thomas Strömberg +- Tom +- Vincent Link +- Yang Keao +- Zhongcheng Lao +- vikkyomkar + + +## Version 1.9.0-beta.2 - 2020-03-21 + +New features & improvements + +* 🎉 Experimental multi-node support 🎊 [#6787](https://github.com/kubernetes/minikube/pull/6787) +* Add kubectl desc nodes to minikube logs [#7105](https://github.com/kubernetes/minikube/pull/7105) +* bumpup helm-tiller v2.16.1 → v2.16.3 [#7130](https://github.com/kubernetes/minikube/pull/7130) +* Update Nvidia GPU plugin [#7132](https://github.com/kubernetes/minikube/pull/7132) +* bumpup istio & istio-provisoner addon 1.4.0 → 1.5.0 [#7120](https://github.com/kubernetes/minikube/pull/7120) +* New addon: registry-aliases [#6657](https://github.com/kubernetes/minikube/pull/6657) +* Upgrade buildroot minor version [#7101](https://github.com/kubernetes/minikube/pull/7101) +* Skip kubeadm if cluster is running & properly configured [#7124](https://github.com/kubernetes/minikube/pull/7124) +* Make certificates per-profile and consistent until IP or names change [#7125](https://github.com/kubernetes/minikube/pull/7125) + +Bugfixes + +* Prevent minikube from crashing if namespace or service doesn't exist [#5844](https://github.com/kubernetes/minikube/pull/5844) +* Add warning if both vm-driver and driver are specified [#7109](https://github.com/kubernetes/minikube/pull/7109) +* Improve error when docker-env is used with non-docker runtime [#7112](https://github.com/kubernetes/minikube/pull/7112) +* provisioner: only reload docker if necessary, don't install curl [#7115](https://github.com/kubernetes/minikube/pull/7115) + +Thank you to our contributors: + +- Anders F Björklund +- Iso Kenta +- Kamesh Sampath +- Kenta Iso +- Prasad Katti +- Priya Wadhwa +- Sharif Elgamal +- Tacio Costa +- Thomas Strömberg +- Zhongcheng Lao +- rajula96reddy +- sayboras + +## Version 1.9.0-beta.1 - 2020-03-18 + +New features + +* Use Kubernetes v1.18.0-rc.1 by default [#7076](https://github.com/kubernetes/minikube/pull/7076) +* Upgrade Docker driver to preferred (Linux), default on other platforms [#7090](https://github.com/kubernetes/minikube/pull/7090) +* Upgrade Docker, from 19.03.7 to 19.03.8 [#7040](https://github.com/kubernetes/minikube/pull/7040) +* Upgrade Docker, from 19.03.6 to 19.03.7 [#6939](https://github.com/kubernetes/minikube/pull/6939) +* Upgrade dashboard to v2.0.0-rc6 [#7098](https://github.com/kubernetes/minikube/pull/7098) +* Upgrade crio to 1.17.1 [#7099](https://github.com/kubernetes/minikube/pull/7099) +* Updated French translation [#7055](https://github.com/kubernetes/minikube/pull/7055) + +Bugfixes + +* If user doesn't specify driver, don't validate against existing cluster [#7096](https://github.com/kubernetes/minikube/pull/7096) +* Strip the version prefix before calling semver [#7054](https://github.com/kubernetes/minikube/pull/7054) +* Move some of the driver validation before driver selection [#7080](https://github.com/kubernetes/minikube/pull/7080) +* Fix bug where global config memory was ignored [#7082](https://github.com/kubernetes/minikube/pull/7082) +* Remove controllerManager from the kubeadm v1beta2 template [#7030](https://github.com/kubernetes/minikube/pull/7030) +* Delete: output underlying status failure [#7043](https://github.com/kubernetes/minikube/pull/7043) +* status: error properly if cluster does not exist [#7041](https://github.com/kubernetes/minikube/pull/7041) + +Huge thank you for this release towards our contributors: + +- Anders F Björklund +- Medya Ghazizadeh +- Priya Wadhwa +- RA489 +- Richard Wall +- Sharif Elgamal +- Thomas Strömberg +- Vikky Omkar +- jumahmohammad + ## Version 1.8.2 - 2020-03-13 Shiny new improvements: diff --git a/Makefile b/Makefile index bd54cff886..e0fe592f0a 100755 --- a/Makefile +++ b/Makefile @@ -14,15 +14,13 @@ # Bump these on release - and please check ISO_VERSION for correctness. VERSION_MAJOR ?= 1 -VERSION_MINOR ?= 8 +VERSION_MINOR ?= 9 VERSION_BUILD ?= 2 -RAW_VERSION=$(VERSION_MAJOR).$(VERSION_MINOR).${VERSION_BUILD} +RAW_VERSION=$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_BUILD) VERSION ?= v$(RAW_VERSION) KUBERNETES_VERSION ?= $(shell egrep "DefaultKubernetesVersion =" pkg/minikube/constants/constants.go | cut -d \" -f2) KIC_VERSION ?= $(shell egrep "Version =" pkg/drivers/kic/types.go | cut -d \" -f2) -PRELOADED_TARBALL_VERSION ?= $(shell egrep "PreloadVersion =" pkg/minikube/download/preload.go | cut -d \" -f2) -PRELOADED_VOLUMES_GCS_BUCKET ?= $(shell egrep "PreloadBucket =" pkg/minikube/download/preload.go | cut -d \" -f2) # Default to .0 for higher cache hit rates, as build increments typically don't require new ISO versions ISO_VERSION ?= v$(VERSION_MAJOR).$(VERSION_MINOR).0 @@ -34,7 +32,7 @@ RPM_VERSION ?= $(DEB_VERSION) GO_VERSION ?= 1.13.8 INSTALL_SIZE ?= $(shell du out/minikube-windows-amd64.exe | cut -f1) -BUILDROOT_BRANCH ?= 2019.02.9 +BUILDROOT_BRANCH ?= 2019.02.10 REGISTRY?=gcr.io/k8s-minikube # Get git commit id @@ -54,7 +52,7 @@ MINIKUBE_BUCKET ?= minikube/releases MINIKUBE_UPLOAD_LOCATION := gs://${MINIKUBE_BUCKET} MINIKUBE_RELEASES_URL=https://github.com/kubernetes/minikube/releases/download -KERNEL_VERSION ?= 4.19.94 +KERNEL_VERSION ?= 4.19.107 # latest from https://github.com/golangci/golangci-lint/releases GOLINT_VERSION ?= v1.23.6 # Limit number of default jobs, to avoid the CI builds running out of memory @@ -113,7 +111,7 @@ MINIKUBE_TEST_FILES := ./cmd/... ./pkg/... MARKDOWNLINT ?= markdownlint -MINIKUBE_MARKDOWN_FILES := README.md docs CONTRIBUTING.md CHANGELOG.md +MINIKUBE_MARKDOWN_FILES := README.md CONTRIBUTING.md CHANGELOG.md MINIKUBE_BUILD_TAGS := container_image_ostree_stub containers_image_openpgp MINIKUBE_BUILD_TAGS += go_getter_nos3 go_getter_nogcs @@ -270,11 +268,11 @@ integration-versioned: out/minikube ## Trigger minikube integration testing .PHONY: test test: pkg/minikube/assets/assets.go pkg/minikube/translate/translations.go ## Trigger minikube test - ./test.sh + MINIKUBE_LDFLAGS="${MINIKUBE_LDFLAGS}" ./test.sh -.PHONY: gotest -gotest: $(SOURCE_GENERATED) ## Trigger minikube test - go test -tags "$(MINIKUBE_BUILD_TAGS)" $(MINIKUBE_TEST_FILES) +.PHONY: generate-docs +generate-docs: out/minikube ## Automatically generate commands documentation. + out/minikube generate-docs --path ./site/content/en/docs/commands/ .PHONY: extract extract: ## Compile extract tool @@ -526,14 +524,8 @@ kic-base-image: ## builds the base image used for kic. docker build -f ./hack/images/kicbase.Dockerfile -t $(REGISTRY)/kicbase:$(KIC_VERSION)-snapshot --build-arg COMMIT_SHA=${VERSION}-$(COMMIT) --target base . .PHONY: upload-preloaded-images-tar -upload-preloaded-images-tar: generate-preloaded-images-tar # Upload the preloaded images tar to the GCS bucket. Specify a specific kubernetes version to build via `KUBERNETES_VERSION=vx.y.z make upload-preloaded-images-tar`. - gsutil cp out/preloaded-images-k8s-${PRELOADED_TARBALL_VERSION}-${KUBERNETES_VERSION}-docker-overlay2.tar.lz4 gs://${PRELOADED_VOLUMES_GCS_BUCKET} - gsutil acl ch -u AllUsers:R gs://${PRELOADED_VOLUMES_GCS_BUCKET}/preloaded-images-k8s-${PRELOADED_TARBALL_VERSION}-${KUBERNETES_VERSION}-docker-overlay2.tar.lz4 - -.PHONY: generate-preloaded-images-tar -generate-preloaded-images-tar: - go run ./hack/preload-images/preload_images.go -kubernetes-version ${KUBERNETES_VERSION} -preloaded-tarball-version ${PRELOADED_TARBALL_VERSION} - +upload-preloaded-images-tar: out/minikube # Upload the preloaded images for oldest supported, newest supported, and default kubernetes versions to GCS. + go run ./hack/preload-images/*.go .PHONY: push-storage-provisioner-image push-storage-provisioner-image: storage-provisioner-image ## Push storage-provisioner docker image using gcloud @@ -633,7 +625,7 @@ release-kvm-driver: install-kvm-driver checksum ## Release KVM Driver gsutil cp $(GOBIN)/docker-machine-driver-kvm2 gs://minikube/drivers/kvm/$(VERSION)/ gsutil cp $(GOBIN)/docker-machine-driver-kvm2.sha256 gs://minikube/drivers/kvm/$(VERSION)/ -site/themes/docsy/assets/vendor/bootstrap/package.js: +site/themes/docsy/assets/vendor/bootstrap/package.js: ## update the website docsy theme git submodule git submodule update -f --init --recursive out/hugo/hugo: diff --git a/README.md b/README.md index 75423b7474..3f002b3713 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,9 @@ [![BuildStatus Widget]][BuildStatus Result] [![GoReport Widget]][GoReport Status] +[![Github All Releases](https://img.shields.io/github/downloads/kubernetes/minikube/total.svg)](https://github.com/kubernetes/minikube/releases/latest) +[![Latest Release](https://img.shields.io/github/v/release/kubernetes/minikube?include_prereleases)](https://github.com/kubernetes/minikube/releases/latest) + [BuildStatus Result]: https://travis-ci.org/kubernetes/minikube [BuildStatus Widget]: https://travis-ci.org/kubernetes/minikube.svg?branch=master @@ -13,7 +16,8 @@ minikube implements a local Kubernetes cluster on macOS, Linux, and Windows. minikube's [primary goals](https://minikube.sigs.k8s.io/docs/concepts/principles/) are to be the best tool for local Kubernetes application development and to support all Kubernetes features that fit. -screenshot +screenshot + ## Features @@ -48,7 +52,7 @@ See https://minikube.sigs.k8s.io/docs/ ## More Examples -See our [examples page](https://minikube.sigs.k8s.io/docs/examples/) +See minikube in action [here](https://minikube.sigs.k8s.io/docs/handbook/controls/) ## Community @@ -59,6 +63,6 @@ minikube is a Kubernetes [#sig-cluster-lifecycle](https://github.com/kubernetes/ * [minikube-dev mailing list](https://groups.google.com/forum/#!forum/minikube-dev) * [Bi-weekly office hours, Mondays @ 11am PST](https://tinyurl.com/minikube-oh) -* [Contributing](https://minikube.sigs.k8s.io/docs/contributing/) -* [Development Roadmap](https://minikube.sigs.k8s.io/docs/contributing/roadmap/) +* [Contributing](https://minikube.sigs.k8s.io/docs/contrib/) +* [Development Roadmap](https://minikube.sigs.k8s.io/docs/contrib/roadmap/) diff --git a/cmd/minikube/cmd/config/addons_list.go b/cmd/minikube/cmd/config/addons_list.go index 73c72ec37e..521182f226 100644 --- a/cmd/minikube/cmd/config/addons_list.go +++ b/cmd/minikube/cmd/config/addons_list.go @@ -26,10 +26,10 @@ import ( "github.com/golang/glog" "github.com/olekukonko/tablewriter" "github.com/spf13/cobra" - "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/assets" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/exit" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" ) @@ -50,11 +50,12 @@ var addonsListCmd = &cobra.Command{ exit.UsageT("usage: minikube addons list") } + _, cc := mustload.Partial(ClusterFlagValue()) switch strings.ToLower(addonListOutput) { case "list": - printAddonsList() + printAddonsList(cc) case "json": - printAddonsJSON() + printAddonsJSON(cc) default: exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'list', 'json'", addonListOutput)) } @@ -86,27 +87,24 @@ var stringFromStatus = func(addonStatus bool) string { return "disabled" } -var printAddonsList = func() { +var printAddonsList = func(cc *config.ClusterConfig) { addonNames := make([]string, 0, len(assets.Addons)) for addonName := range assets.Addons { addonNames = append(addonNames, addonName) } sort.Strings(addonNames) + var tData [][]string table := tablewriter.NewWriter(os.Stdout) table.SetHeader([]string{"Addon Name", "Profile", "Status"}) table.SetAutoFormatHeaders(true) table.SetBorders(tablewriter.Border{Left: true, Top: true, Right: true, Bottom: true}) table.SetCenterSeparator("|") - pName := viper.GetString(config.ProfileName) for _, addonName := range addonNames { addonBundle := assets.Addons[addonName] - addonStatus, err := addonBundle.IsEnabled(pName) - if err != nil { - out.WarningT("Unable to get addon status for {{.name}}: {{.error}}", out.V{"name": addonName, "error": err}) - } - tData = append(tData, []string{addonName, pName, fmt.Sprintf("%s %s", stringFromStatus(addonStatus), iconFromStatus(addonStatus))}) + enabled := addonBundle.IsEnabled(cc) + tData = append(tData, []string{addonName, cc.Name, fmt.Sprintf("%s %s", stringFromStatus(enabled), iconFromStatus(enabled))}) } table.AppendBulk(tData) @@ -121,9 +119,8 @@ var printAddonsList = func() { } } -var printAddonsJSON = func() { +var printAddonsJSON = func(cc *config.ClusterConfig) { addonNames := make([]string, 0, len(assets.Addons)) - pName := viper.GetString(config.ProfileName) for addonName := range assets.Addons { addonNames = append(addonNames, addonName) } @@ -133,16 +130,11 @@ var printAddonsJSON = func() { for _, addonName := range addonNames { addonBundle := assets.Addons[addonName] - - addonStatus, err := addonBundle.IsEnabled(pName) - if err != nil { - glog.Errorf("Unable to get addon status for %s: %v", addonName, err) - continue - } + enabled := addonBundle.IsEnabled(cc) addonsMap[addonName] = map[string]interface{}{ - "Status": stringFromStatus(addonStatus), - "Profile": pName, + "Status": stringFromStatus(enabled), + "Profile": cc.Name, } } jsonString, _ := json.Marshal(addonsMap) diff --git a/cmd/minikube/cmd/config/disable.go b/cmd/minikube/cmd/config/disable.go index af050c105e..a77f44092d 100644 --- a/cmd/minikube/cmd/config/disable.go +++ b/cmd/minikube/cmd/config/disable.go @@ -18,9 +18,7 @@ package config import ( "github.com/spf13/cobra" - "github.com/spf13/viper" "k8s.io/minikube/pkg/addons" - "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/out" ) @@ -35,7 +33,7 @@ var addonsDisableCmd = &cobra.Command{ } addon := args[0] - err := addons.Set(addon, "false", viper.GetString(config.ProfileName)) + err := addons.SetAndSave(ClusterFlagValue(), addon, "false") if err != nil { exit.WithError("disable failed", err) } diff --git a/cmd/minikube/cmd/config/enable.go b/cmd/minikube/cmd/config/enable.go index 5f325a6eed..f05daadfd6 100644 --- a/cmd/minikube/cmd/config/enable.go +++ b/cmd/minikube/cmd/config/enable.go @@ -18,9 +18,7 @@ package config import ( "github.com/spf13/cobra" - "github.com/spf13/viper" "k8s.io/minikube/pkg/addons" - "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/out" ) @@ -34,7 +32,7 @@ var addonsEnableCmd = &cobra.Command{ exit.UsageT("usage: minikube addons enable ADDON_NAME") } addon := args[0] - err := addons.Set(addon, "true", viper.GetString(config.ProfileName)) + err := addons.SetAndSave(ClusterFlagValue(), addon, "true") if err != nil { exit.WithError("enable failed", err) } diff --git a/cmd/minikube/cmd/config/flags.go b/cmd/minikube/cmd/config/flags.go new file mode 100644 index 0000000000..5a978ab08b --- /dev/null +++ b/cmd/minikube/cmd/config/flags.go @@ -0,0 +1,27 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "github.com/spf13/viper" + "k8s.io/minikube/pkg/minikube/config" +) + +// ClusterFlagValue returns the current cluster name based on flags +func ClusterFlagValue() string { + return viper.GetString(config.ProfileName) +} diff --git a/cmd/minikube/cmd/config/get.go b/cmd/minikube/cmd/config/get.go index 5c3f034ab7..f5a7899aa2 100644 --- a/cmd/minikube/cmd/config/get.go +++ b/cmd/minikube/cmd/config/get.go @@ -21,7 +21,7 @@ import ( "fmt" "github.com/spf13/cobra" - pkgConfig "k8s.io/minikube/pkg/minikube/config" + config "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/out" ) @@ -59,5 +59,5 @@ func init() { // Get gets a property func Get(name string) (string, error) { - return pkgConfig.Get(name) + return config.Get(name) } diff --git a/cmd/minikube/cmd/config/open.go b/cmd/minikube/cmd/config/open.go index c4c8b5416d..b49dce72fe 100644 --- a/cmd/minikube/cmd/config/open.go +++ b/cmd/minikube/cmd/config/open.go @@ -18,19 +18,13 @@ package config import ( "fmt" - "os" "text/template" - "github.com/pkg/browser" - "github.com/spf13/cobra" - "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/assets" - "k8s.io/minikube/pkg/minikube/config" - pkg_config "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/driver" + "k8s.io/minikube/pkg/minikube/browser" "k8s.io/minikube/pkg/minikube/exit" - "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/service" ) @@ -62,36 +56,19 @@ var addonsOpenCmd = &cobra.Command{ exit.UsageT("usage: minikube addons open ADDON_NAME") } addonName := args[0] - // TODO(r2d4): config should not reference API, pull this out - api, err := machine.NewAPIClient() - if err != nil { - exit.WithError("Error getting client", err) - } - defer api.Close() - profileName := viper.GetString(pkg_config.ProfileName) - cc, err := config.Load(profileName) - if err != nil { - exit.WithError("Error getting cluster", err) - } - cp, err := config.PrimaryControlPlane(cc) - if err != nil { - exit.WithError("Error getting control plane", err) - } - if !machine.IsRunning(api, driver.MachineName(*cc, cp)) { - os.Exit(1) - } + cname := ClusterFlagValue() + co := mustload.Healthy(cname) + addon, ok := assets.Addons[addonName] // validate addon input if !ok { exit.WithCodeT(exit.Data, `addon '{{.name}}' is not a valid addon packaged with minikube. To see the list of available addons run: minikube addons list`, out.V{"name": addonName}) } - ok, err = addon.IsEnabled(profileName) - if err != nil { - exit.WithError("IsEnabled failed", err) - } - if !ok { + + enabled := addon.IsEnabled(co.Config) + if !enabled { exit.WithCodeT(exit.Unavailable, `addon '{{.name}}' is currently not enabled. To enable this addon run: minikube addons enable {{.name}}`, out.V{"name": addonName}) @@ -112,7 +89,7 @@ You can add one by annotating a service with the label {{.labelName}}:{{.addonNa svc := serviceList.Items[i].ObjectMeta.Name var urlString []string - if urlString, err = service.WaitForService(api, namespace, svc, addonsURLTemplate, addonsURLMode, https, wait, interval); err != nil { + if urlString, err = service.WaitForService(co.API, namespace, svc, addonsURLTemplate, addonsURLMode, https, wait, interval); err != nil { exit.WithCodeT(exit.Unavailable, "Wait failed: {{.error}}", out.V{"error": err}) } diff --git a/cmd/minikube/cmd/config/profile.go b/cmd/minikube/cmd/config/profile.go index 31bd0bb0af..46afa5237a 100644 --- a/cmd/minikube/cmd/config/profile.go +++ b/cmd/minikube/cmd/config/profile.go @@ -20,11 +20,10 @@ import ( "os" "github.com/spf13/cobra" - "github.com/spf13/viper" - pkgConfig "k8s.io/minikube/pkg/minikube/config" - pkg_config "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/kubeconfig" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" ) @@ -35,7 +34,7 @@ var ProfileCmd = &cobra.Command{ Long: "profile sets the current minikube profile, or gets the current profile if no arguments are provided. This is used to run and manage multiple minikube instance. You can return to the default minikube profile by running `minikube profile default`", Run: func(cmd *cobra.Command, args []string) { if len(args) == 0 { - profile := viper.GetString(pkgConfig.ProfileName) + profile := ClusterFlagValue() out.T(out.Empty, profile) os.Exit(0) } @@ -49,9 +48,8 @@ var ProfileCmd = &cobra.Command{ we need to add code over here to check whether the profile name is in the list of reserved keywords */ - if pkgConfig.ProfileNameInReservedKeywords(profile) { - out.ErrT(out.FailureType, `Profile name "{{.profilename}}" is minikube keyword. To delete profile use command minikube delete -p `, out.V{"profilename": profile}) - os.Exit(0) + if config.ProfileNameInReservedKeywords(profile) { + exit.WithCodeT(exit.Config, `Profile name "{{.profilename}}" is reserved keyword. To delete this profile, run: "{{.cmd}}"`, out.V{"profilename": profile, "cmd": mustload.ExampleCmd(profile, "delete")}) } if profile == "default" { @@ -64,18 +62,18 @@ var ProfileCmd = &cobra.Command{ } } - if !pkgConfig.ProfileExists(profile) { + if !config.ProfileExists(profile) { out.ErrT(out.Tip, `if you want to create a profile you can by this command: minikube start -p {{.profile_name}}`, out.V{"profile_name": profile}) os.Exit(0) } - err := Set(pkgConfig.ProfileName, profile) + err := Set(config.ProfileName, profile) if err != nil { exit.WithError("Setting profile failed", err) } - cc, err := pkgConfig.Load(profile) + cc, err := config.Load(profile) // might err when loading older version of cfg file that doesn't have KeepContext field - if err != nil && !pkg_config.IsNotExist(err) { + if err != nil && !config.IsNotExist(err) { out.ErrT(out.Sad, `Error loading profile config: {{.error}}`, out.V{"error": err}) } if err == nil { diff --git a/cmd/minikube/cmd/config/profile_list.go b/cmd/minikube/cmd/config/profile_list.go index d3b4bf1b9f..3a7cfa6069 100644 --- a/cmd/minikube/cmd/config/profile_list.go +++ b/cmd/minikube/cmd/config/profile_list.go @@ -91,13 +91,13 @@ var printProfilesTable = func() { table.Render() if invalidProfiles != nil { - out.T(out.Warning, "Found {{.number}} invalid profile(s) ! ", out.V{"number": len(invalidProfiles)}) + out.WarningT("Found {{.number}} invalid profile(s) ! ", out.V{"number": len(invalidProfiles)}) for _, p := range invalidProfiles { - out.T(out.Empty, "\t "+p.Name) + out.ErrT(out.Empty, "\t "+p.Name) } - out.T(out.Tip, "You can delete them using the following command(s): ") + out.ErrT(out.Tip, "You can delete them using the following command(s): ") for _, p := range invalidProfiles { - out.String(fmt.Sprintf("\t $ minikube delete -p %s \n", p.Name)) + out.Err(fmt.Sprintf("\t $ minikube delete -p %s \n", p.Name)) } } diff --git a/cmd/minikube/cmd/config/set.go b/cmd/minikube/cmd/config/set.go index 21f99863bf..b074af6aef 100644 --- a/cmd/minikube/cmd/config/set.go +++ b/cmd/minikube/cmd/config/set.go @@ -19,7 +19,7 @@ package config import ( "github.com/pkg/errors" "github.com/spf13/cobra" - pkgConfig "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/out" @@ -61,11 +61,11 @@ func Set(name string, value string) error { } // Set the value - config, err := pkgConfig.ReadConfig(localpath.ConfigFile()) + cc, err := config.ReadConfig(localpath.ConfigFile()) if err != nil { return errors.Wrapf(err, "read config file %q", localpath.ConfigFile()) } - err = s.set(config, name, value) + err = s.set(cc, name, value) if err != nil { return errors.Wrapf(err, "set") } @@ -77,5 +77,5 @@ func Set(name string, value string) error { } // Write the value - return pkgConfig.WriteConfig(localpath.ConfigFile(), config) + return config.WriteConfig(localpath.ConfigFile(), cc) } diff --git a/cmd/minikube/cmd/config/unset.go b/cmd/minikube/cmd/config/unset.go index 1c68b53d9f..122d8ca828 100644 --- a/cmd/minikube/cmd/config/unset.go +++ b/cmd/minikube/cmd/config/unset.go @@ -18,7 +18,7 @@ package config import ( "github.com/spf13/cobra" - pkgConfig "k8s.io/minikube/pkg/minikube/config" + config "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/localpath" ) @@ -44,10 +44,10 @@ func init() { // Unset unsets a property func Unset(name string) error { - m, err := pkgConfig.ReadConfig(localpath.ConfigFile()) + m, err := config.ReadConfig(localpath.ConfigFile()) if err != nil { return err } delete(m, name) - return pkgConfig.WriteConfig(localpath.ConfigFile(), m) + return config.WriteConfig(localpath.ConfigFile(), m) } diff --git a/cmd/minikube/cmd/config/util_test.go b/cmd/minikube/cmd/config/util_test.go index b085ea965f..4347cb4073 100644 --- a/cmd/minikube/cmd/config/util_test.go +++ b/cmd/minikube/cmd/config/util_test.go @@ -20,11 +20,11 @@ import ( "fmt" "testing" - pkgConfig "k8s.io/minikube/pkg/minikube/config" + config "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" ) -var minikubeConfig = pkgConfig.MinikubeConfig{ +var minikubeConfig = config.MinikubeConfig{ "driver": driver.KVM2, "cpus": 12, "show-libmachine-logs": true, @@ -83,21 +83,10 @@ func TestSetBool(t *testing.T) { } func TestValidateProfile(t *testing.T) { - testCases := []struct { - profileName string - }{ - { - profileName: "82374328742_2974224498", - }, - { - profileName: "validate_test", - }, - } - - for _, test := range testCases { - profileNam := test.profileName - expected := fmt.Sprintf("profile %q not found", test.profileName) - err, ok := ValidateProfile(profileNam) + testCases := []string{"82374328742_2974224498", "validate_test"} + for _, name := range testCases { + expected := fmt.Sprintf("profile %q not found", name) + err, ok := ValidateProfile(name) if !ok && err.Error() != expected { t.Errorf("got error %q, expected %q", err, expected) } diff --git a/cmd/minikube/cmd/config/validations.go b/cmd/minikube/cmd/config/validations.go index 8f57e13c45..071512daea 100644 --- a/cmd/minikube/cmd/config/validations.go +++ b/cmd/minikube/cmd/config/validations.go @@ -40,7 +40,7 @@ func IsValidDriver(string, name string) error { // RequiresRestartMsg returns the "requires restart" message func RequiresRestartMsg(string, string) error { - out.T(out.Warning, "These changes will take effect upon a minikube delete and then a minikube start") + out.WarningT("These changes will take effect upon a minikube delete and then a minikube start") return nil } diff --git a/cmd/minikube/cmd/dashboard.go b/cmd/minikube/cmd/dashboard.go index b5fd848eb3..51957e4b58 100644 --- a/cmd/minikube/cmd/dashboard.go +++ b/cmd/minikube/cmd/dashboard.go @@ -21,25 +21,20 @@ import ( "fmt" "io" "net/http" - "os" "os/exec" "os/user" "regexp" "time" - "github.com/docker/machine/libmachine/mcnerror" "github.com/golang/glog" - "github.com/pkg/browser" "github.com/pkg/errors" "github.com/spf13/cobra" - "github.com/spf13/viper" - pkgaddons "k8s.io/minikube/pkg/addons" + "k8s.io/minikube/pkg/addons" "k8s.io/minikube/pkg/minikube/assets" - "k8s.io/minikube/pkg/minikube/config" - pkg_config "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/driver" + + "k8s.io/minikube/pkg/minikube/browser" "k8s.io/minikube/pkg/minikube/exit" - "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/proxy" "k8s.io/minikube/pkg/minikube/service" @@ -59,68 +54,27 @@ var dashboardCmd = &cobra.Command{ Short: "Access the kubernetes dashboard running within the minikube cluster", Long: `Access the kubernetes dashboard running within the minikube cluster`, Run: func(cmd *cobra.Command, args []string) { - profileName := viper.GetString(pkg_config.ProfileName) - cc, err := pkg_config.Load(profileName) - if err != nil && !pkg_config.IsNotExist(err) { - exit.WithError("Error loading profile config", err) - } + cname := ClusterFlagValue() + co := mustload.Healthy(cname) - if err != nil { - out.ErrT(out.Meh, `"{{.name}}" profile does not exist`, out.V{"name": profileName}) - os.Exit(1) - } - - api, err := machine.NewAPIClient() - defer func() { - err := api.Close() - if err != nil { - glog.Warningf("Failed to close API: %v", err) - } - }() - - if err != nil { - exit.WithError("Error getting client", err) - } - - cp, err := config.PrimaryControlPlane(cc) - if err != nil { - exit.WithError("Error getting primary control plane", err) - } - - machineName := driver.MachineName(*cc, cp) - if _, err = api.Load(machineName); err != nil { - switch err := errors.Cause(err).(type) { - case mcnerror.ErrHostDoesNotExist: - exit.WithCodeT(exit.Unavailable, "{{.name}} cluster does not exist", out.V{"name": cc.Name}) - default: - exit.WithError("Error getting cluster", err) - } - } - - for _, n := range cc.Nodes { - err = proxy.ExcludeIP(n.IP) // to be used for http get calls - if err != nil { + for _, n := range co.Config.Nodes { + if err := proxy.ExcludeIP(n.IP); err != nil { glog.Errorf("Error excluding IP from proxy: %s", err) } } - kubectl, err := exec.LookPath("kubectl") - if err != nil { - exit.WithCodeT(exit.NoInput, "kubectl not found in PATH, but is required for the dashboard. Installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl/") - } - - if !machine.IsRunning(api, machineName) { - os.Exit(1) - } + kubectlVersion := co.Config.KubernetesConfig.KubernetesVersion + var err error // Check dashboard status before enabling it - dashboardAddon := assets.Addons["dashboard"] - dashboardStatus, _ := dashboardAddon.IsEnabled(profileName) - if !dashboardStatus { + addon := assets.Addons["dashboard"] + enabled := addon.IsEnabled(co.Config) + + if !enabled { // Send status messages to stderr for folks re-using this output. out.ErrT(out.Enabling, "Enabling dashboard ...") // Enable the dashboard add-on - err = pkgaddons.Set("dashboard", "true", profileName) + err = addons.SetAndSave(cname, "dashboard", "true") if err != nil { exit.WithError("Unable to enable dashboard", err) } @@ -135,7 +89,7 @@ var dashboardCmd = &cobra.Command{ } out.ErrT(out.Launch, "Launching proxy ...") - p, hostPort, err := kubectlProxy(kubectl, machineName) + p, hostPort, err := kubectlProxy(kubectlVersion, cname) if err != nil { exit.WithError("kubectl proxy", err) } @@ -169,10 +123,17 @@ var dashboardCmd = &cobra.Command{ } // kubectlProxy runs "kubectl proxy", returning host:port -func kubectlProxy(path string, machineName string) (*exec.Cmd, string, error) { +func kubectlProxy(kubectlVersion string, contextName string) (*exec.Cmd, string, error) { // port=0 picks a random system port - cmd := exec.Command(path, "--context", machineName, "proxy", "--port=0") + kubectlArgs := []string{"--context", contextName, "proxy", "--port=0"} + + var cmd *exec.Cmd + if kubectl, err := exec.LookPath("kubectl"); err == nil { + cmd = exec.Command(kubectl, kubectlArgs...) + } else if cmd, err = KubectlCommand(kubectlVersion, kubectlArgs...); err != nil { + return nil, "", err + } stdoutPipe, err := cmd.StdoutPipe() if err != nil { diff --git a/cmd/minikube/cmd/delete.go b/cmd/minikube/cmd/delete.go index 253f48128a..22c16062d3 100644 --- a/cmd/minikube/cmd/delete.go +++ b/cmd/minikube/cmd/delete.go @@ -141,10 +141,10 @@ func runDelete(cmd *cobra.Command, args []string) { exit.UsageT("usage: minikube delete") } - profileName := viper.GetString(config.ProfileName) - profile, err := config.LoadProfile(profileName) + cname := ClusterFlagValue() + profile, err := config.LoadProfile(cname) if err != nil { - out.ErrT(out.Meh, `"{{.name}}" profile does not exist, trying anyways.`, out.V{"name": profileName}) + out.ErrT(out.Meh, `"{{.name}}" profile does not exist, trying anyways.`, out.V{"name": cname}) } errs := DeleteProfiles([]*config.Profile{profile}) @@ -208,7 +208,13 @@ func deleteProfileContainersAndVolumes(name string) { func deleteProfile(profile *config.Profile) error { viper.Set(config.ProfileName, profile.Name) - deleteProfileContainersAndVolumes(profile.Name) + if profile.Config != nil { + // if driver is oci driver, delete containers and volumes + if driver.IsKIC(profile.Config.Driver) { + out.T(out.DeletingHost, `Deleting "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": profile.Name, "driver_name": profile.Config.Driver}) + deleteProfileContainersAndVolumes(profile.Name) + } + } api, err := machine.NewAPIClient() if err != nil { @@ -236,7 +242,7 @@ func deleteProfile(profile *config.Profile) error { } if err := killMountProcess(); err != nil { - out.T(out.FailureType, "Failed to kill mount process: {{.error}}", out.V{"error": err}) + out.FailureT("Failed to kill mount process: {{.error}}", out.V{"error": err}) } deleteHosts(api, cc) @@ -264,7 +270,7 @@ func deleteHosts(api libmachine.API, cc *config.ClusterConfig) { case mcnerror.ErrHostDoesNotExist: glog.Infof("Host %s does not exist. Proceeding ahead with cleanup.", machineName) default: - out.T(out.FailureType, "Failed to delete cluster: {{.error}}", out.V{"error": err}) + out.FailureT("Failed to delete cluster: {{.error}}", out.V{"error": err}) out.T(out.Notice, `You may need to manually remove the "{{.name}}" VM from your hypervisor`, out.V{"name": machineName}) } } @@ -272,13 +278,13 @@ func deleteHosts(api libmachine.API, cc *config.ClusterConfig) { } } -func deleteConfig(profileName string) error { - if err := config.DeleteProfile(profileName); err != nil { +func deleteConfig(cname string) error { + if err := config.DeleteProfile(cname); err != nil { if config.IsNotExist(err) { - delErr := profileDeletionErr(profileName, fmt.Sprintf("\"%s\" profile does not exist", profileName)) + delErr := profileDeletionErr(cname, fmt.Sprintf("\"%s\" profile does not exist", cname)) return DeletionError{Err: delErr, Errtype: MissingProfile} } - delErr := profileDeletionErr(profileName, fmt.Sprintf("failed to remove profile %v", err)) + delErr := profileDeletionErr(cname, fmt.Sprintf("failed to remove profile %v", err)) return DeletionError{Err: delErr, Errtype: Fatal} } return nil @@ -317,8 +323,8 @@ func deleteInvalidProfile(profile *config.Profile) []error { return errs } -func profileDeletionErr(profileName string, additionalInfo string) error { - return fmt.Errorf("error deleting profile \"%s\": %s", profileName, additionalInfo) +func profileDeletionErr(cname string, additionalInfo string) error { + return fmt.Errorf("error deleting profile \"%s\": %s", cname, additionalInfo) } func uninstallKubernetes(api libmachine.API, cc config.ClusterConfig, n config.Node, bsName string) error { @@ -402,7 +408,7 @@ func deleteProfileDirectory(profile string) { out.T(out.DeletingHost, `Removing {{.directory}} ...`, out.V{"directory": machineDir}) err := os.RemoveAll(machineDir) if err != nil { - exit.WithError("Unable to remove machine directory: %v", err) + exit.WithError("Unable to remove machine directory", err) } } } diff --git a/cmd/minikube/cmd/docker-env.go b/cmd/minikube/cmd/docker-env.go index bf554f151c..c4432c6b58 100644 --- a/cmd/minikube/cmd/docker-env.go +++ b/cmd/minikube/cmd/docker-env.go @@ -27,19 +27,17 @@ import ( "strconv" "strings" - "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/state" "github.com/spf13/cobra" - "github.com/spf13/viper" "k8s.io/minikube/pkg/drivers/kic/oci" - "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/localpath" - "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/shell" + "k8s.io/minikube/pkg/minikube/sysinit" ) var dockerEnvTmpl = fmt.Sprintf("{{ .Prefix }}%s{{ .Delimiter }}{{ .DockerTLSVerify }}{{ .Suffix }}{{ .Prefix }}%s{{ .Delimiter }}{{ .DockerHost }}{{ .Suffix }}{{ .Prefix }}%s{{ .Delimiter }}{{ .DockerCertPath }}{{ .Suffix }}{{ .Prefix }}%s{{ .Delimiter }}{{ .MinikubeDockerdProfile }}{{ .Suffix }}{{ if .NoProxyVar }}{{ .Prefix }}{{ .NoProxyVar }}{{ .Delimiter }}{{ .NoProxyValue }}{{ .Suffix }}{{end}}{{ .UsageHint }}", constants.DockerTLSVerifyEnv, constants.DockerHostEnv, constants.DockerCertPathEnv, constants.MinikubeActiveDockerdEnv) @@ -117,18 +115,8 @@ func (EnvNoProxyGetter) GetNoProxyVar() (string, string) { } // isDockerActive checks if Docker is active -func isDockerActive(d drivers.Driver) (bool, error) { - client, err := drivers.GetSSHClientFromDriver(d) - if err != nil { - return false, err - } - output, err := client.Output("sudo systemctl is-active docker") - if err != nil { - return false, err - } - // systemd returns error code on inactive - s := strings.TrimSpace(output) - return err == nil && s == "active", nil +func isDockerActive(r command.Runner) bool { + return sysinit.New(r).Active("docker") } // dockerEnvCmd represents the docker-env command @@ -137,88 +125,62 @@ var dockerEnvCmd = &cobra.Command{ Short: "Sets up docker env variables; similar to '$(docker-machine env)'", Long: `Sets up docker env variables; similar to '$(docker-machine env)'.`, Run: func(cmd *cobra.Command, args []string) { - api, err := machine.NewAPIClient() - if err != nil { - exit.WithError("Error getting client", err) + cname := ClusterFlagValue() + co := mustload.Running(cname) + driverName := co.CP.Host.DriverName + + if driverName == driver.None { + exit.UsageT(`'none' driver does not support 'minikube docker-env' command`) } - defer api.Close() - profile := viper.GetString(config.ProfileName) - cc, err := config.Load(profile) - if err != nil { - exit.WithError("Error getting config", err) + if co.Config.KubernetesConfig.ContainerRuntime != "docker" { + exit.WithCodeT(exit.BadUsage, `The docker-env command is only compatible with the "docker" runtime, but this cluster was configured to use the "{{.runtime}}" runtime.`, + out.V{"runtime": co.Config.KubernetesConfig.ContainerRuntime}) } - for _, n := range cc.Nodes { - machineName := driver.MachineName(*cc, n) - host, err := machine.LoadHost(api, machineName) + + if ok := isDockerActive(co.CP.Runner); !ok { + exit.WithCodeT(exit.Unavailable, `The docker service within '{{.name}}' is not active`, out.V{"name": cname}) + } + + sh := shell.EnvConfig{ + Shell: shell.ForceShell, + } + + var err error + port := constants.DockerDaemonPort + if driver.NeedsPortForward(driverName) { + port, err = oci.ForwardedPort(driverName, cname, port) if err != nil { - exit.WithError("Error getting host", err) - } - if host.Driver.DriverName() == driver.None { - exit.UsageT(`'none' driver does not support 'minikube docker-env' command`) + exit.WithCodeT(exit.Failure, "Error getting port binding for '{{.driver_name}} driver: {{.error}}", out.V{"driver_name": driverName, "error": err}) } + } - hostSt, err := machine.Status(api, machineName) + ec := DockerEnvConfig{ + EnvConfig: sh, + profile: cname, + driver: driverName, + hostIP: co.CP.IP.String(), + port: port, + certsDir: localpath.MakeMiniPath("certs"), + noProxy: noProxy, + } + + if ec.Shell == "" { + ec.Shell, err = shell.Detect() if err != nil { - exit.WithError("Error getting host status", err) - } - if hostSt != state.Running.String() { - exit.WithCodeT(exit.Unavailable, `'{{.profile}}' is not running`, out.V{"profile": profile}) - } - ok, err := isDockerActive(host.Driver) - if err != nil { - exit.WithError("Error getting service status", err) + exit.WithError("Error detecting shell", err) } + } - if !ok { - exit.WithCodeT(exit.Unavailable, `The docker service within '{{.profile}}' is not active`, out.V{"profile": profile}) + if dockerUnset { + if err := dockerUnsetScript(ec, os.Stdout); err != nil { + exit.WithError("Error generating unset output", err) } + return + } - hostIP, err := host.Driver.GetIP() - if err != nil { - exit.WithError("Error getting host IP", err) - } - - sh := shell.EnvConfig{ - Shell: shell.ForceShell, - } - - port := constants.DockerDaemonPort - if driver.IsKIC(host.DriverName) { // for kic we need to find what port docker/podman chose for us - hostIP = oci.DefaultBindIPV4 - port, err = oci.ForwardedPort(host.DriverName, profile, port) - if err != nil { - exit.WithCodeT(exit.Failure, "Error getting port binding for '{{.driver_name}} driver: {{.error}}", out.V{"driver_name": host.DriverName, "error": err}) - } - } - - ec := DockerEnvConfig{ - EnvConfig: sh, - profile: profile, - driver: host.DriverName, - hostIP: hostIP, - port: port, - certsDir: localpath.MakeMiniPath("certs"), - noProxy: noProxy, - } - - if ec.Shell == "" { - ec.Shell, err = shell.Detect() - if err != nil { - exit.WithError("Error detecting shell", err) - } - } - - if dockerUnset { - if err := dockerUnsetScript(ec, os.Stdout); err != nil { - exit.WithError("Error generating unset output", err) - } - return - } - - if err := dockerSetScript(ec, os.Stdout); err != nil { - exit.WithError("Error generating set output", err) - } + if err := dockerSetScript(ec, os.Stdout); err != nil { + exit.WithError("Error generating set output", err) } }, } diff --git a/cmd/minikube/cmd/generate-docs.go b/cmd/minikube/cmd/generate-docs.go index 3bd6aba070..e87b7270a4 100644 --- a/cmd/minikube/cmd/generate-docs.go +++ b/cmd/minikube/cmd/generate-docs.go @@ -20,7 +20,7 @@ import ( "os" "github.com/spf13/cobra" - "github.com/spf13/cobra/doc" + "k8s.io/minikube/pkg/generate" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/out" ) @@ -43,7 +43,7 @@ var generateDocs = &cobra.Command{ } // generate docs - if err := doc.GenMarkdownTree(RootCmd, path); err != nil { + if err := generate.Docs(RootCmd, path); err != nil { exit.WithError("Unable to generate docs", err) } out.T(out.Documentation, "Docs have been saved at - {{.path}}", out.V{"path": path}) diff --git a/cmd/minikube/cmd/generate-docs_test.go b/cmd/minikube/cmd/generate-docs_test.go new file mode 100644 index 0000000000..3a4e8b6ba6 --- /dev/null +++ b/cmd/minikube/cmd/generate-docs_test.go @@ -0,0 +1,51 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "fmt" + "io/ioutil" + "path/filepath" + "testing" + + "github.com/google/go-cmp/cmp" + "k8s.io/minikube/pkg/generate" +) + +func TestGenerateDocs(t *testing.T) { + dir := "../../../site/content/en/docs/commands/" + + for _, sc := range RootCmd.Commands() { + t.Run(sc.Name(), func(t *testing.T) { + if sc.Hidden { + t.Skip() + } + fp := filepath.Join(dir, fmt.Sprintf("%s.md", sc.Name())) + expectedContents, err := ioutil.ReadFile(fp) + if err != nil { + t.Fatalf("Docs are not updated. Please run `make generate-docs` to update commands documentation: %v", err) + } + actualContents, err := generate.DocForCommand(sc) + if err != nil { + t.Fatalf("error getting contents: %v", err) + } + if diff := cmp.Diff(actualContents, string(expectedContents)); diff != "" { + t.Fatalf("Docs are not updated. Please run `make generate-docs` to update commands documentation: %s", diff) + } + }) + } +} diff --git a/cmd/minikube/cmd/ip.go b/cmd/minikube/cmd/ip.go index 466323b4a4..6a2ca32055 100644 --- a/cmd/minikube/cmd/ip.go +++ b/cmd/minikube/cmd/ip.go @@ -17,14 +17,8 @@ limitations under the License. package cmd import ( - "github.com/docker/machine/libmachine/mcnerror" - "github.com/pkg/errors" "github.com/spf13/cobra" - "github.com/spf13/viper" - "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/driver" - "k8s.io/minikube/pkg/minikube/exit" - "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" ) @@ -34,32 +28,7 @@ var ipCmd = &cobra.Command{ Short: "Retrieves the IP address of the running cluster", Long: `Retrieves the IP address of the running cluster, and writes it to STDOUT.`, Run: func(cmd *cobra.Command, args []string) { - api, err := machine.NewAPIClient() - if err != nil { - exit.WithError("Error getting client", err) - } - defer api.Close() - - cc, err := config.Load(viper.GetString(config.ProfileName)) - if err != nil { - exit.WithError("Error getting config", err) - } - for _, n := range cc.Nodes { - machineName := driver.MachineName(*cc, n) - host, err := api.Load(machineName) - if err != nil { - switch err := errors.Cause(err).(type) { - case mcnerror.ErrHostDoesNotExist: - exit.WithCodeT(exit.NoInput, `"{{.profile_name}}" host does not exist, unable to show an IP`, out.V{"profile_name": cc.Name}) - default: - exit.WithError("Error getting host", err) - } - } - ip, err := host.Driver.GetIP() - if err != nil { - exit.WithError("Error getting IP", err) - } - out.Ln(ip) - } + co := mustload.Running(ClusterFlagValue()) + out.Ln(co.CP.IP.String()) }, } diff --git a/cmd/minikube/cmd/kubectl.go b/cmd/minikube/cmd/kubectl.go index e24943a7d4..3eca6dfb06 100644 --- a/cmd/minikube/cmd/kubectl.go +++ b/cmd/minikube/cmd/kubectl.go @@ -24,10 +24,8 @@ import ( "github.com/golang/glog" "github.com/spf13/cobra" - "github.com/spf13/viper" - "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" - "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/out" ) @@ -42,30 +40,15 @@ Examples: minikube kubectl -- --help minikube kubectl -- get pods --namespace kube-system`, Run: func(cmd *cobra.Command, args []string) { - api, err := machine.NewAPIClient() - if err != nil { - fmt.Fprintf(os.Stderr, "Error getting client: %v\n", err) - os.Exit(1) - } - defer api.Close() + co := mustload.Healthy(ClusterFlagValue()) - cc, err := config.Load(viper.GetString(config.ProfileName)) - if err != nil && !config.IsNotExist(err) { - out.ErrLn("Error loading profile config: %v", err) - } - - version := constants.DefaultKubernetesVersion - if cc != nil { - version = cc.KubernetesConfig.KubernetesVersion - } - - path, err := node.CacheKubectlBinary(version) + version := co.Config.KubernetesConfig.KubernetesVersion + c, err := KubectlCommand(version, args...) if err != nil { out.ErrLn("Error caching kubectl: %v", err) } glog.Infof("Running %s %v", path, args) - c := exec.Command(path, args...) c.Stdin = os.Stdin c.Stdout = os.Stdout c.Stderr = os.Stderr @@ -82,3 +65,17 @@ minikube kubectl -- get pods --namespace kube-system`, } }, } + +// KubectlCommand will return kubectl command with a version matching the cluster +func KubectlCommand(version string, args ...string) (*exec.Cmd, error) { + if version == "" { + version = constants.DefaultKubernetesVersion + } + + path, err := node.CacheKubectlBinary(version) + if err != nil { + return nil, err + } + + return exec.Command(path, args...), nil +} diff --git a/cmd/minikube/cmd/logs.go b/cmd/minikube/cmd/logs.go index cf36b52cb2..109938dc34 100644 --- a/cmd/minikube/cmd/logs.go +++ b/cmd/minikube/cmd/logs.go @@ -17,22 +17,22 @@ limitations under the License. package cmd import ( + "os" + "github.com/spf13/cobra" "github.com/spf13/viper" cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" "k8s.io/minikube/pkg/minikube/cluster" - "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/cruntime" - "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/logs" - "k8s.io/minikube/pkg/minikube/machine" - "k8s.io/minikube/pkg/minikube/node" + "k8s.io/minikube/pkg/minikube/mustload" + "k8s.io/minikube/pkg/minikube/out" ) const ( // number of problems per log to output - numberOfProblems = 5 + numberOfProblems = 10 ) var ( @@ -51,64 +51,35 @@ var logsCmd = &cobra.Command{ Short: "Gets the logs of the running instance, used for debugging minikube, not user code.", Long: `Gets the logs of the running instance, used for debugging minikube, not user code.`, Run: func(cmd *cobra.Command, args []string) { - cfg, err := config.Load(viper.GetString(config.ProfileName)) - if err != nil { - exit.WithError("Error getting config", err) - } + co := mustload.Running(ClusterFlagValue()) - if nodeName == "" { - cp, err := config.PrimaryControlPlane(cfg) - if err != nil { - exit.WithError("Error getting primary control plane", err) - } - nodeName = cp.Name - } - - n, _, err := node.Retrieve(cfg, nodeName) - if err != nil { - exit.WithError("Error retrieving node", err) - } - - machineName := driver.MachineName(*cfg, *n) - - api, err := machine.NewAPIClient() - if err != nil { - exit.WithError("Error getting client", err) - } - defer api.Close() - - h, err := api.Load(machineName) - if err != nil { - exit.WithError("api load", err) - } - runner, err := machine.CommandRunner(h) - if err != nil { - exit.WithError("command runner", err) - } - bs, err := cluster.Bootstrapper(api, viper.GetString(cmdcfg.Bootstrapper), *cfg, *n) + bs, err := cluster.Bootstrapper(co.API, viper.GetString(cmdcfg.Bootstrapper), *co.Config, *co.CP.Node) if err != nil { exit.WithError("Error getting cluster bootstrapper", err) } - cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: runner}) + cr, err := cruntime.New(cruntime.Config{Type: co.Config.KubernetesConfig.ContainerRuntime, Runner: co.CP.Runner}) if err != nil { exit.WithError("Unable to get runtime", err) } if followLogs { - err := logs.Follow(cr, bs, runner) + err := logs.Follow(cr, bs, *co.Config, co.CP.Runner) if err != nil { exit.WithError("Follow", err) } return } if showProblems { - problems := logs.FindProblems(cr, bs, runner) + problems := logs.FindProblems(cr, bs, *co.Config, co.CP.Runner) logs.OutputProblems(problems, numberOfProblems) return } - err = logs.Output(cr, bs, runner, numberOfLines) + err = logs.Output(cr, bs, *co.Config, co.CP.Runner, numberOfLines) if err != nil { - exit.WithError("Error getting machine logs", err) + out.Ln("") + // Avoid exit.WithError, since it outputs the issue URL + out.WarningT("{{.error}}", out.V{"error": err}) + os.Exit(exit.Unavailable) } }, } diff --git a/cmd/minikube/cmd/mount.go b/cmd/minikube/cmd/mount.go index 46ea74b524..fa0db9f75d 100644 --- a/cmd/minikube/cmd/mount.go +++ b/cmd/minikube/cmd/mount.go @@ -30,12 +30,10 @@ import ( "github.com/golang/glog" "github.com/pkg/errors" "github.com/spf13/cobra" - "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/cluster" - "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" - "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/third_party/go9p/ufs" ) @@ -99,30 +97,16 @@ var mountCmd = &cobra.Command{ if glog.V(1) { debugVal = 1 // ufs.StartServer takes int debug param } - api, err := machine.NewAPIClient() - if err != nil { - exit.WithError("Error getting client", err) - } - defer api.Close() - cc, err := config.Load(viper.GetString(config.ProfileName)) - if err != nil { - exit.WithError("Error getting config", err) - } - cp, err := config.PrimaryControlPlane(cc) - if err != nil { - exit.WithError("Error getting primary cp", err) - } - host, err := api.Load(driver.MachineName(*cc, cp)) - if err != nil { - exit.WithError("Error loading api", err) - } - if host.Driver.DriverName() == driver.None { + co := mustload.Running(ClusterFlagValue()) + if co.CP.Host.Driver.DriverName() == driver.None { exit.UsageT(`'none' driver does not support 'minikube mount' command`) } + var ip net.IP + var err error if mountIP == "" { - ip, err = cluster.GetVMHostIP(host) + ip, err = cluster.GetVMHostIP(co.CP.Host) if err != nil { exit.WithError("Error getting the host IP address to use from within the VM", err) } @@ -159,11 +143,11 @@ var mountCmd = &cobra.Command{ // An escape valve to allow future hackers to try NFS, VirtFS, or other FS types. if !supportedFilesystems[cfg.Type] { - out.T(out.Warning, "{{.type}} is not yet a supported filesystem. We will try anyways!", out.V{"type": cfg.Type}) + out.WarningT("{{.type}} is not yet a supported filesystem. We will try anyways!", out.V{"type": cfg.Type}) } bindIP := ip.String() // the ip to listen on the user's host machine - if driver.IsKIC(host.Driver.DriverName()) && runtime.GOOS != "linux" { + if driver.IsKIC(co.CP.Host.Driver.DriverName()) && runtime.GOOS != "linux" { bindIP = "127.0.0.1" } out.T(out.Mounting, "Mounting host path {{.sourcePath}} into VM as {{.destinationPath}} ...", out.V{"sourcePath": hostPath, "destinationPath": vmPath}) @@ -187,27 +171,21 @@ var mountCmd = &cobra.Command{ }() } - // Use CommandRunner, as the native docker ssh service dies when Ctrl-C is received. - runner, err := machine.CommandRunner(host) - if err != nil { - exit.WithError("Failed to get command runner", err) - } - // Unmount if Ctrl-C or kill request is received. c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt, syscall.SIGTERM) go func() { for sig := range c { out.T(out.Unmount, "Unmounting {{.path}} ...", out.V{"path": vmPath}) - err := cluster.Unmount(runner, vmPath) + err := cluster.Unmount(co.CP.Runner, vmPath) if err != nil { - out.ErrT(out.FailureType, "Failed unmount: {{.error}}", out.V{"error": err}) + out.FailureT("Failed unmount: {{.error}}", out.V{"error": err}) } exit.WithCodeT(exit.Interrupted, "Received {{.name}} signal", out.V{"name": sig}) } }() - err = cluster.Mount(runner, ip.String(), vmPath, cfg) + err = cluster.Mount(co.CP.Runner, ip.String(), vmPath, cfg) if err != nil { exit.WithError("mount failed", err) } diff --git a/cmd/minikube/cmd/node.go b/cmd/minikube/cmd/node.go index 7b70780f74..39dbac6c7c 100644 --- a/cmd/minikube/cmd/node.go +++ b/cmd/minikube/cmd/node.go @@ -23,10 +23,9 @@ import ( // nodeCmd represents the set of node subcommands var nodeCmd = &cobra.Command{ - Use: "node", - Short: "Node operations", - Long: "Operations on nodes", - Hidden: true, // This won't be fully functional and thus should not be documented yet + Use: "node", + Short: "Node operations", + Long: "Operations on nodes", Run: func(cmd *cobra.Command, args []string) { exit.UsageT("Usage: minikube node [add|start|stop|delete]") }, diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index 9ee9e39f1e..3344065794 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -17,13 +17,11 @@ limitations under the License. package cmd import ( - "fmt" - "github.com/spf13/cobra" - "github.com/spf13/pflag" - "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/out" ) @@ -37,39 +35,41 @@ var nodeAddCmd = &cobra.Command{ Short: "Adds a node to the given cluster.", Long: "Adds a node to the given cluster config, and starts it.", Run: func(cmd *cobra.Command, args []string) { - profile := viper.GetString(config.ProfileName) - cc, err := config.Load(profile) - if err != nil { - exit.WithError("Error getting config", err) + co := mustload.Healthy(ClusterFlagValue()) + cc := co.Config + + if driver.BareMetal(cc.Driver) { + out.FailureT("none driver does not support multi-node clusters") } - //name := profile + strconv.Itoa(len(mc.Nodes)+1) - name := fmt.Sprintf("m%d", len(cc.Nodes)+1) + name := node.Name(len(cc.Nodes) + 1) - out.T(out.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": profile}) + out.T(out.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name}) - n, err := node.Add(cc, name, cp, worker, "", profile) - if err != nil { - exit.WithError("Error adding node to cluster", err) + // TODO: Deal with parameters better. Ideally we should be able to acceot any node-specific minikube start params here. + n := config.Node{ + Name: name, + Worker: worker, + ControlPlane: cp, + KubernetesVersion: cc.KubernetesConfig.KubernetesVersion, } - _, err = node.Start(*cc, *n, false, nil) - if err != nil { - exit.WithError("Error starting node", err) + if err := node.Add(cc, n); err != nil { + _, err := maybeDeleteAndRetry(*cc, n, nil, err) + if err != nil { + exit.WithError("failed to add node", err) + } } - out.T(out.Ready, "Successfully added {{.name}} to {{.cluster}}!", out.V{"name": name, "cluster": profile}) + out.T(out.Ready, "Successfully added {{.name}} to {{.cluster}}!", out.V{"name": name, "cluster": cc.Name}) }, } func init() { + // TODO(https://github.com/kubernetes/minikube/issues/7366): We should figure out which minikube start flags to actually import nodeAddCmd.Flags().BoolVar(&cp, "control-plane", false, "If true, the node added will also be a control plane in addition to a worker.") nodeAddCmd.Flags().BoolVar(&worker, "worker", true, "If true, the added node will be marked for work. Defaults to true.") - //We should figure out which of these flags to actually import - startCmd.Flags().Visit( - func(f *pflag.Flag) { - nodeAddCmd.Flags().AddFlag(f) - }, - ) + nodeAddCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.") + nodeCmd.AddCommand(nodeAddCmd) } diff --git a/cmd/minikube/cmd/node_delete.go b/cmd/minikube/cmd/node_delete.go index f35c2d2c56..bca5c497c4 100644 --- a/cmd/minikube/cmd/node_delete.go +++ b/cmd/minikube/cmd/node_delete.go @@ -18,9 +18,8 @@ package cmd import ( "github.com/spf13/cobra" - "github.com/spf13/viper" - "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/exit" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/out" ) @@ -36,17 +35,11 @@ var nodeDeleteCmd = &cobra.Command{ } name := args[0] - profile := viper.GetString(config.ProfileName) - out.T(out.DeletingHost, "Deleting node {{.name}} from cluster {{.cluster}}", out.V{"name": name, "cluster": profile}) + co := mustload.Healthy(ClusterFlagValue()) + out.T(out.DeletingHost, "Deleting node {{.name}} from cluster {{.cluster}}", out.V{"name": name, "cluster": co.Config.Name}) - cc, err := config.Load(profile) - if err != nil { - exit.WithError("loading config", err) - } - - err = node.Delete(*cc, name) - if err != nil { - out.FatalT("Failed to delete node {{.name}}", out.V{"name": name}) + if err := node.Delete(*co.Config, name); err != nil { + exit.WithError("deleting node", err) } out.T(out.Deleted, "Node {{.name}} was successfully deleted.", out.V{"name": name}) diff --git a/cmd/minikube/cmd/node_start.go b/cmd/minikube/cmd/node_start.go index c0090b6287..81f9ac6b80 100644 --- a/cmd/minikube/cmd/node_start.go +++ b/cmd/minikube/cmd/node_start.go @@ -20,10 +20,9 @@ import ( "os" "github.com/spf13/cobra" - "github.com/spf13/viper" - "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/out" ) @@ -37,38 +36,46 @@ var nodeStartCmd = &cobra.Command{ exit.UsageT("Usage: minikube node start [name]") } + api, cc := mustload.Partial(ClusterFlagValue()) name := args[0] - // Make sure it's not running - api, err := machine.NewAPIClient() - if err != nil { - exit.WithError("creating api client", err) - } - if machine.IsRunning(api, name) { out.T(out.Check, "{{.name}} is already running", out.V{"name": name}) os.Exit(0) } - cc, err := config.Load(viper.GetString(config.ProfileName)) - if err != nil { - exit.WithError("loading config", err) - } - n, _, err := node.Retrieve(cc, name) if err != nil { exit.WithError("retrieving node", err) } - // Start it up baby - _, err = node.Start(*cc, *n, false, nil) + r, p, m, h, err := node.Provision(cc, n, false) if err != nil { - out.FatalT("Failed to start node {{.name}}", out.V{"name": name}) + exit.WithError("provisioning host for node", err) + } + + s := node.Starter{ + Runner: r, + PreExists: p, + MachineAPI: m, + Host: h, + Cfg: cc, + Node: n, + ExistingAddons: nil, + } + + _, err = node.Start(s, false) + if err != nil { + _, err := maybeDeleteAndRetry(*cc, *n, nil, err) + if err != nil { + exit.WithError("failed to start node", err) + } } }, } func init() { nodeStartCmd.Flags().String("name", "", "The name of the node to start") + nodeStartCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.") nodeCmd.AddCommand(nodeStartCmd) } diff --git a/cmd/minikube/cmd/node_stop.go b/cmd/minikube/cmd/node_stop.go index e2a37573b8..5dbceba1bc 100644 --- a/cmd/minikube/cmd/node_stop.go +++ b/cmd/minikube/cmd/node_stop.go @@ -18,11 +18,10 @@ package cmd import ( "github.com/spf13/cobra" - "github.com/spf13/viper" - "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/out" ) @@ -37,16 +36,7 @@ var nodeStopCmd = &cobra.Command{ } name := args[0] - - api, err := machine.NewAPIClient() - if err != nil { - exit.WithError("creating api client", err) - } - - cc, err := config.Load(viper.GetString(config.ProfileName)) - if err != nil { - exit.WithError("getting config", err) - } + api, cc := mustload.Partial(ClusterFlagValue()) n, _, err := node.Retrieve(cc, name) if err != nil { diff --git a/cmd/minikube/cmd/options.go b/cmd/minikube/cmd/options.go index cc07420d6c..80f97606d1 100644 --- a/cmd/minikube/cmd/options.go +++ b/cmd/minikube/cmd/options.go @@ -37,10 +37,9 @@ var optionsCmd = &cobra.Command{ // runOptions handles the executes the flow of "minikube options" func runOptions(cmd *cobra.Command, args []string) { out.String("The following options can be passed to any command:\n\n") - for _, flagName := range viperWhiteList { - f := pflag.Lookup(flagName) - out.String(flagUsage(f)) - } + cmd.Root().PersistentFlags().VisitAll(func(flag *pflag.Flag) { + out.String(flagUsage(flag)) + }) } func flagUsage(flag *pflag.Flag) string { diff --git a/cmd/minikube/cmd/pause.go b/cmd/minikube/cmd/pause.go index 4b63ed963c..d7b090ce83 100644 --- a/cmd/minikube/cmd/pause.go +++ b/cmd/minikube/cmd/pause.go @@ -17,7 +17,6 @@ limitations under the License. package cmd import ( - "os" "strings" "github.com/golang/glog" @@ -25,11 +24,12 @@ import ( "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/cluster" - "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" ) @@ -46,27 +46,10 @@ var pauseCmd = &cobra.Command{ } func runPause(cmd *cobra.Command, args []string) { - cname := viper.GetString(config.ProfileName) - api, err := machine.NewAPIClient() - if err != nil { - exit.WithError("Error getting client", err) - } - defer api.Close() - cc, err := config.Load(cname) + co := mustload.Running(ClusterFlagValue()) - if err != nil && !config.IsNotExist(err) { - exit.WithError("Error loading profile config", err) - } - - if err != nil { - out.ErrT(out.Meh, `"{{.name}}" profile does not exist`, out.V{"name": cname}) - os.Exit(1) - } - - glog.Infof("config: %+v", cc) - - for _, n := range cc.Nodes { - host, err := machine.LoadHost(api, driver.MachineName(*cc, n)) + for _, n := range co.Config.Nodes { + host, err := machine.LoadHost(co.API, driver.MachineName(*co.Config, n)) if err != nil { exit.WithError("Error getting host", err) } @@ -76,7 +59,7 @@ func runPause(cmd *cobra.Command, args []string) { exit.WithError("Failed to get command runner", err) } - cr, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime, Runner: r}) + cr, err := cruntime.New(cruntime.Config{Type: co.Config.KubernetesConfig.ContainerRuntime, Runner: r}) if err != nil { exit.WithError("Failed runtime", err) } @@ -102,6 +85,6 @@ func runPause(cmd *cobra.Command, args []string) { } func init() { - pauseCmd.Flags().StringSliceVarP(&namespaces, "--namespaces", "n", cluster.DefaultNamespaces, "namespaces to pause") + pauseCmd.Flags().StringSliceVarP(&namespaces, "--namespaces", "n", constants.DefaultNamespaces, "namespaces to pause") pauseCmd.Flags().BoolVarP(&allNamespaces, "all-namespaces", "A", false, "If set, pause all namespaces") } diff --git a/cmd/minikube/cmd/podman-env.go b/cmd/minikube/cmd/podman-env.go index f68191f539..124a721de8 100644 --- a/cmd/minikube/cmd/podman-env.go +++ b/cmd/minikube/cmd/podman-env.go @@ -27,16 +27,13 @@ import ( "strings" "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/host" "github.com/docker/machine/libmachine/ssh" - "github.com/docker/machine/libmachine/state" "github.com/spf13/cobra" - "github.com/spf13/viper" - "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" - "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/shell" ) @@ -67,15 +64,16 @@ func podmanShellCfgSet(ec PodmanEnvConfig, envMap map[string]string) *PodmanShel } // isPodmanAvailable checks if Podman is available -func isPodmanAvailable(host *host.Host) (bool, error) { - // we need both "varlink bridge" and "podman varlink" - if _, err := host.RunSSHCommand("which varlink"); err != nil { - return false, err +func isPodmanAvailable(r command.Runner) bool { + if _, err := r.RunCmd(exec.Command("which", "varlink")); err != nil { + return false } - if _, err := host.RunSSHCommand("which podman"); err != nil { - return false, err + + if _, err := r.RunCmd(exec.Command("which", "podman")); err != nil { + return false } - return true, nil + + return true } func createExternalSSHClient(d drivers.Driver) (*ssh.ExternalClient, error) { @@ -108,75 +106,49 @@ var podmanEnvCmd = &cobra.Command{ Short: "Sets up podman env variables; similar to '$(podman-machine env)'", Long: `Sets up podman env variables; similar to '$(podman-machine env)'.`, Run: func(cmd *cobra.Command, args []string) { - api, err := machine.NewAPIClient() - if err != nil { - exit.WithError("Error getting client", err) + cname := ClusterFlagValue() + co := mustload.Running(cname) + driverName := co.CP.Host.DriverName + + if driverName == driver.None { + exit.UsageT(`'none' driver does not support 'minikube podman-env' command`) } - defer api.Close() - profile := viper.GetString(config.ProfileName) - cc, err := config.Load(profile) - if err != nil { - exit.WithError("Error getting config", err) + if ok := isPodmanAvailable(co.CP.Runner); !ok { + exit.WithCodeT(exit.Unavailable, `The podman service within '{{.cluster}}' is not active`, out.V{"cluster": cname}) } - for _, n := range cc.Nodes { - machineName := driver.MachineName(*cc, n) - host, err := machine.LoadHost(api, machineName) + + client, err := createExternalSSHClient(co.CP.Host.Driver) + if err != nil { + exit.WithError("Error getting ssh client", err) + } + + sh := shell.EnvConfig{ + Shell: shell.ForceShell, + } + ec := PodmanEnvConfig{ + EnvConfig: sh, + profile: cname, + driver: driverName, + client: client, + } + + if ec.Shell == "" { + ec.Shell, err = shell.Detect() if err != nil { - exit.WithError("Error getting host", err) - } - if host.Driver.DriverName() == driver.None { - exit.UsageT(`'none' driver does not support 'minikube podman-env' command`) + exit.WithError("Error detecting shell", err) } + } - hostSt, err := machine.Status(api, machineName) - if err != nil { - exit.WithError("Error getting host status", err) - } - if hostSt != state.Running.String() { - exit.WithCodeT(exit.Unavailable, `'{{.profile}}' is not running`, out.V{"profile": profile}) - } - ok, err := isPodmanAvailable(host) - if err != nil { - exit.WithError("Error getting service status", err) + if podmanUnset { + if err := podmanUnsetScript(ec, os.Stdout); err != nil { + exit.WithError("Error generating unset output", err) } + return + } - if !ok { - exit.WithCodeT(exit.Unavailable, `The podman service within '{{.profile}}' is not active`, out.V{"profile": profile}) - } - - client, err := createExternalSSHClient(host.Driver) - if err != nil { - exit.WithError("Error getting ssh client", err) - } - - sh := shell.EnvConfig{ - Shell: shell.ForceShell, - } - ec := PodmanEnvConfig{ - EnvConfig: sh, - profile: profile, - driver: host.DriverName, - client: client, - } - - if ec.Shell == "" { - ec.Shell, err = shell.Detect() - if err != nil { - exit.WithError("Error detecting shell", err) - } - } - - if podmanUnset { - if err := podmanUnsetScript(ec, os.Stdout); err != nil { - exit.WithError("Error generating unset output", err) - } - return - } - - if err := podmanSetScript(ec, os.Stdout); err != nil { - exit.WithError("Error generating set output", err) - } + if err := podmanSetScript(ec, os.Stdout); err != nil { + exit.WithError("Error generating set output", err) } }, } diff --git a/cmd/minikube/cmd/service.go b/cmd/minikube/cmd/service.go index 7801c5529d..039afefc28 100644 --- a/cmd/minikube/cmd/service.go +++ b/cmd/minikube/cmd/service.go @@ -17,6 +17,7 @@ limitations under the License. package cmd import ( + "errors" "fmt" "net/url" "os" @@ -29,17 +30,13 @@ import ( "time" "github.com/golang/glog" - "github.com/pkg/browser" "github.com/spf13/cobra" - "github.com/spf13/viper" "k8s.io/minikube/pkg/drivers/kic/oci" - "k8s.io/minikube/pkg/minikube/config" - pkg_config "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/driver" + "k8s.io/minikube/pkg/minikube/browser" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/localpath" - "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/service" "k8s.io/minikube/pkg/minikube/tunnel/kic" @@ -77,33 +74,22 @@ var serviceCmd = &cobra.Command{ } svc := args[0] - api, err := machine.NewAPIClient() - if err != nil { - exit.WithError("Error getting client", err) - } - defer api.Close() - profileName := viper.GetString(pkg_config.ProfileName) - cfg, err := config.Load(profileName) - if err != nil { - exit.WithError("Error getting config", err) - } - cp, err := config.PrimaryControlPlane(cfg) - if err != nil { - exit.WithError("Error getting control plane", err) - } - machineName := driver.MachineName(*cfg, cp) - if !machine.IsRunning(api, machineName) { - os.Exit(1) - } + cname := ClusterFlagValue() + co := mustload.Healthy(cname) - if runtime.GOOS == "darwin" && cfg.Driver == oci.Docker { - startKicServiceTunnel(svc, cfg.Name) + if runtime.GOOS == "darwin" && co.Config.Driver == oci.Docker { + startKicServiceTunnel(svc, cname) return } - urls, err := service.WaitForService(api, namespace, svc, serviceURLTemplate, serviceURLMode, https, wait, interval) + urls, err := service.WaitForService(co.API, namespace, svc, serviceURLTemplate, serviceURLMode, https, wait, interval) if err != nil { + var s *service.SVCNotFoundError + if errors.As(err, &s) { + exit.WithCodeT(exit.Data, `Service '{{.service}}' was not found in '{{.namespace}}' namespace. +You may select another namespace by using 'minikube service {{.service}} -n '. Or list out all the services using 'minikube service list'`, out.V{"service": svc, "namespace": namespace}) + } exit.WithError("Error opening service", err) } @@ -151,7 +137,7 @@ func startKicServiceTunnel(svc, configName string) { service.PrintServiceList(os.Stdout, data) openURLs(svc, urls) - out.T(out.Warning, "Because you are using docker driver on Mac, the terminal needs to be open to run it.") + out.WarningT("Because you are using docker driver on Mac, the terminal needs to be open to run it.") <-ctrlC diff --git a/cmd/minikube/cmd/service_list.go b/cmd/minikube/cmd/service_list.go index f22aa71d23..7b17ff3c1e 100644 --- a/cmd/minikube/cmd/service_list.go +++ b/cmd/minikube/cmd/service_list.go @@ -22,14 +22,10 @@ import ( "strings" "github.com/spf13/cobra" - "github.com/spf13/viper" core "k8s.io/api/core/v1" "k8s.io/minikube/pkg/drivers/kic/oci" - "k8s.io/minikube/pkg/minikube/config" - pkg_config "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" - "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/service" ) @@ -42,24 +38,9 @@ var serviceListCmd = &cobra.Command{ Short: "Lists the URLs for the services in your local cluster", Long: `Lists the URLs for the services in your local cluster`, Run: func(cmd *cobra.Command, args []string) { - api, err := machine.NewAPIClient() - if err != nil { - exit.WithError("Error getting client", err) - } - defer api.Close() - profileName := viper.GetString(pkg_config.ProfileName) - cfg, err := config.Load(profileName) - if err != nil { - exit.WithError("Error getting config", err) - } - cp, err := config.PrimaryControlPlane(cfg) - if err != nil { - exit.WithError("Error getting primary control plane", err) - } - if !machine.IsRunning(api, driver.MachineName(*cfg, cp)) { - exit.WithCodeT(exit.Unavailable, "profile {{.name}} is not running.", out.V{"name": profileName}) - } - serviceURLs, err := service.GetServiceURLs(api, serviceListNamespace, serviceURLTemplate) + co := mustload.Healthy(ClusterFlagValue()) + + serviceURLs, err := service.GetServiceURLs(co.API, serviceListNamespace, serviceURLTemplate) if err != nil { out.FatalT("Failed to get service URL: {{.error}}", out.V{"error": err}) out.ErrT(out.Notice, "Check that minikube is running and that you have specified the correct namespace (-n flag) if required.") @@ -71,14 +52,15 @@ var serviceListCmd = &cobra.Command{ if len(serviceURL.URLs) == 0 { data = append(data, []string{serviceURL.Namespace, serviceURL.Name, "No node port"}) } else { + servicePortNames := strings.Join(serviceURL.PortNames, "\n") serviceURLs := strings.Join(serviceURL.URLs, "\n") // if we are running Docker on OSX we empty the internal service URLs - if runtime.GOOS == "darwin" && cfg.Driver == oci.Docker { + if runtime.GOOS == "darwin" && co.Config.Driver == oci.Docker { serviceURLs = "" } - data = append(data, []string{serviceURL.Namespace, serviceURL.Name, "", serviceURLs}) + data = append(data, []string{serviceURL.Namespace, serviceURL.Name, servicePortNames, serviceURLs}) } } diff --git a/cmd/minikube/cmd/ssh-key.go b/cmd/minikube/cmd/ssh-key.go index 61d2c441a8..b7a0ddf0de 100644 --- a/cmd/minikube/cmd/ssh-key.go +++ b/cmd/minikube/cmd/ssh-key.go @@ -20,10 +20,8 @@ import ( "path/filepath" "github.com/spf13/cobra" - "github.com/spf13/viper" - "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/localpath" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" ) @@ -33,10 +31,7 @@ var sshKeyCmd = &cobra.Command{ Short: "Retrieve the ssh identity key path of the specified cluster", Long: "Retrieve the ssh identity key path of the specified cluster.", Run: func(cmd *cobra.Command, args []string) { - cc, err := config.Load(viper.GetString(config.ProfileName)) - if err != nil { - exit.WithError("Getting machine config failed", err) - } + _, cc := mustload.Partial(ClusterFlagValue()) out.Ln(filepath.Join(localpath.MiniPath(), "machines", cc.Name, "id_rsa")) }, } diff --git a/cmd/minikube/cmd/ssh.go b/cmd/minikube/cmd/ssh.go index 8c78c87ee2..bbd39af369 100644 --- a/cmd/minikube/cmd/ssh.go +++ b/cmd/minikube/cmd/ssh.go @@ -21,12 +21,13 @@ import ( "github.com/docker/machine/libmachine/ssh" "github.com/spf13/cobra" - "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" + "k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/out" ) @@ -40,34 +41,30 @@ var sshCmd = &cobra.Command{ Short: "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'", Long: "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'.", Run: func(cmd *cobra.Command, args []string) { - api, err := machine.NewAPIClient() - if err != nil { - exit.WithError("Error getting client", err) - } - defer api.Close() - cc, err := config.Load(viper.GetString(config.ProfileName)) - if err != nil { - exit.WithError("Error getting config", err) - } - // TODO: allow choice of node to ssh into - cp, err := config.PrimaryControlPlane(cc) - if err != nil { - exit.WithError("Error getting primary control plane", err) - } - host, err := machine.LoadHost(api, driver.MachineName(*cc, cp)) - if err != nil { - exit.WithError("Error getting host", err) - } - if host.Driver.DriverName() == driver.None { + cname := ClusterFlagValue() + co := mustload.Running(cname) + if co.CP.Host.DriverName == driver.None { exit.UsageT("'none' driver does not support 'minikube ssh' command") } + + var err error + var n *config.Node + if nodeName == "" { + n = co.CP.Node + } else { + n, _, err = node.Retrieve(co.Config, nodeName) + if err != nil { + exit.WithCodeT(exit.Unavailable, "Node {{.nodeName}} does not exist.", out.V{"nodeName": nodeName}) + } + } + if nativeSSHClient { ssh.SetDefaultClient(ssh.Native) } else { ssh.SetDefaultClient(ssh.External) } - err = machine.CreateSSHShell(api, *cc, cp, args) + err = machine.CreateSSHShell(co.API, *co.Config, *n, args) if err != nil { // This is typically due to a non-zero exit code, so no need for flourish. out.ErrLn("ssh: %v", err) @@ -78,5 +75,6 @@ var sshCmd = &cobra.Command{ } func init() { - sshCmd.Flags().BoolVar(&nativeSSHClient, nativeSSH, true, "Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'.") + sshCmd.Flags().Bool(nativeSSH, true, "Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'.") + sshCmd.Flags().StringVarP(&nodeName, "node", "n", "", "The node to ssh into. Defaults to the primary control plane.") } diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index ec0d58cbf5..7855e91085 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -27,7 +27,6 @@ import ( "os/user" "runtime" "strings" - "time" "github.com/blang/semver" "github.com/docker/machine/libmachine/ssh" @@ -47,81 +46,24 @@ import ( "k8s.io/minikube/pkg/minikube/bootstrapper/images" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" - "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/minikube/download" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/kubeconfig" "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/notify" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/proxy" "k8s.io/minikube/pkg/minikube/registry" "k8s.io/minikube/pkg/minikube/translate" + "k8s.io/minikube/pkg/util" pkgutil "k8s.io/minikube/pkg/util" "k8s.io/minikube/pkg/version" ) -const ( - isoURL = "iso-url" - memory = "memory" - cpus = "cpus" - humanReadableDiskSize = "disk-size" - nfsSharesRoot = "nfs-shares-root" - nfsShare = "nfs-share" - kubernetesVersion = "kubernetes-version" - hostOnlyCIDR = "host-only-cidr" - containerRuntime = "container-runtime" - criSocket = "cri-socket" - networkPlugin = "network-plugin" - enableDefaultCNI = "enable-default-cni" - hypervVirtualSwitch = "hyperv-virtual-switch" - hypervUseExternalSwitch = "hyperv-use-external-switch" - hypervExternalAdapter = "hyperv-external-adapter" - kvmNetwork = "kvm-network" - kvmQemuURI = "kvm-qemu-uri" - kvmGPU = "kvm-gpu" - kvmHidden = "kvm-hidden" - minikubeEnvPrefix = "MINIKUBE" - installAddons = "install-addons" - defaultDiskSize = "20000mb" - keepContext = "keep-context" - createMount = "mount" - featureGates = "feature-gates" - apiServerName = "apiserver-name" - apiServerPort = "apiserver-port" - dnsDomain = "dns-domain" - serviceCIDR = "service-cluster-ip-range" - imageRepository = "image-repository" - imageMirrorCountry = "image-mirror-country" - mountString = "mount-string" - disableDriverMounts = "disable-driver-mounts" - cacheImages = "cache-images" - uuid = "uuid" - vpnkitSock = "hyperkit-vpnkit-sock" - vsockPorts = "hyperkit-vsock-ports" - embedCerts = "embed-certs" - noVTXCheck = "no-vtx-check" - downloadOnly = "download-only" - dnsProxy = "dns-proxy" - hostDNSResolver = "host-dns-resolver" - waitUntilHealthy = "wait" - force = "force" - dryRun = "dry-run" - interactive = "interactive" - waitTimeout = "wait-timeout" - nativeSSH = "native-ssh" - minUsableMem = 1024 // Kubernetes will not start with less than 1GB - minRecommendedMem = 2000 // Warn at no lower than existing configurations - minimumCPUS = 2 - minimumDiskSize = 2000 - autoUpdate = "auto-update-drivers" - hostOnlyNicType = "host-only-nic-type" - natNicType = "nat-nic-type" -) - var ( registryMirror []string insecureRegistry []string @@ -139,100 +81,6 @@ func init() { } } -// initMinikubeFlags includes commandline flags for minikube. -func initMinikubeFlags() { - viper.SetEnvPrefix(minikubeEnvPrefix) - // Replaces '-' in flags with '_' in env variables - // e.g. iso-url => $ENVPREFIX_ISO_URL - viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_")) - viper.AutomaticEnv() - - startCmd.Flags().Bool(force, false, "Force minikube to perform possibly dangerous operations") - startCmd.Flags().Bool(interactive, true, "Allow user prompts for more information") - startCmd.Flags().Bool(dryRun, false, "dry-run mode. Validates configuration, but does not mutate system state") - - startCmd.Flags().Int(cpus, 2, "Number of CPUs allocated to Kubernetes.") - startCmd.Flags().String(memory, "", "Amount of RAM to allocate to Kubernetes (format: [], where unit = b, k, m or g).") - startCmd.Flags().String(humanReadableDiskSize, defaultDiskSize, "Disk size allocated to the minikube VM (format: [], where unit = b, k, m or g).") - startCmd.Flags().Bool(downloadOnly, false, "If true, only download and cache files for later use - don't install or start anything.") - startCmd.Flags().Bool(cacheImages, true, "If true, cache docker images for the current bootstrapper and load them into the machine. Always false with --driver=none.") - startCmd.Flags().StringSlice(isoURL, download.DefaultISOURLs(), "Locations to fetch the minikube ISO from.") - startCmd.Flags().Bool(keepContext, false, "This will keep the existing kubectl context and will create a minikube context.") - startCmd.Flags().Bool(embedCerts, false, "if true, will embed the certs in kubeconfig.") - startCmd.Flags().String(containerRuntime, "docker", "The container runtime to be used (docker, crio, containerd).") - startCmd.Flags().Bool(createMount, false, "This will start the mount daemon and automatically mount files into minikube.") - startCmd.Flags().String(mountString, constants.DefaultMountDir+":/minikube-host", "The argument to pass the minikube mount command on start.") - startCmd.Flags().StringArrayVar(&node.AddonList, "addons", nil, "Enable addons. see `minikube addons list` for a list of valid addon names.") - startCmd.Flags().String(criSocket, "", "The cri socket path to be used.") - startCmd.Flags().String(networkPlugin, "", "The name of the network plugin.") - startCmd.Flags().Bool(enableDefaultCNI, false, "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \"--network-plugin=cni\".") - startCmd.Flags().Bool(waitUntilHealthy, true, "Block until the apiserver is servicing API requests") - startCmd.Flags().Duration(waitTimeout, 6*time.Minute, "max time to wait per Kubernetes core services to be healthy.") - startCmd.Flags().Bool(nativeSSH, true, "Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'.") - startCmd.Flags().Bool(autoUpdate, true, "If set, automatically updates drivers to the latest version. Defaults to true.") - startCmd.Flags().Bool(installAddons, true, "If set, install addons. Defaults to true.") -} - -// initKubernetesFlags inits the commandline flags for kubernetes related options -func initKubernetesFlags() { - startCmd.Flags().String(kubernetesVersion, "", "The kubernetes version that the minikube VM will use (ex: v1.2.3)") - startCmd.Flags().Var(&node.ExtraOptions, "extra-config", - `A set of key=value pairs that describe configuration that may be passed to different components. - The key should be '.' separated, and the first part before the dot is the component to apply the configuration to. - Valid components are: kubelet, kubeadm, apiserver, controller-manager, etcd, kube-proxy, scheduler - Valid kubeadm parameters: `+fmt.Sprintf("%s, %s", strings.Join(bsutil.KubeadmExtraArgsWhitelist[bsutil.KubeadmCmdParam], ", "), strings.Join(bsutil.KubeadmExtraArgsWhitelist[bsutil.KubeadmConfigParam], ","))) - startCmd.Flags().String(featureGates, "", "A set of key=value pairs that describe feature gates for alpha/experimental features.") - startCmd.Flags().String(dnsDomain, constants.ClusterDNSDomain, "The cluster dns domain name used in the kubernetes cluster") - startCmd.Flags().Int(apiServerPort, constants.APIServerPort, "The apiserver listening port") - startCmd.Flags().String(apiServerName, constants.APIServerName, "The apiserver name which is used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine") - startCmd.Flags().StringArrayVar(&apiServerNames, "apiserver-names", nil, "A set of apiserver names which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine") - startCmd.Flags().IPSliceVar(&apiServerIPs, "apiserver-ips", nil, "A set of apiserver IP Addresses which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine") -} - -// initDriverFlags inits the commandline flags for vm drivers -func initDriverFlags() { - startCmd.Flags().String("driver", "", fmt.Sprintf("Driver is one of: %v (defaults to auto-detect)", driver.DisplaySupportedDrivers())) - startCmd.Flags().String("vm-driver", "", "DEPRECATED, use `driver` instead.") - startCmd.Flags().Bool(disableDriverMounts, false, "Disables the filesystem mounts provided by the hypervisors") - - // kvm2 - startCmd.Flags().String(kvmNetwork, "default", "The KVM network name. (kvm2 driver only)") - startCmd.Flags().String(kvmQemuURI, "qemu:///system", "The KVM QEMU connection URI. (kvm2 driver only)") - startCmd.Flags().Bool(kvmGPU, false, "Enable experimental NVIDIA GPU support in minikube") - startCmd.Flags().Bool(kvmHidden, false, "Hide the hypervisor signature from the guest in minikube (kvm2 driver only)") - - // virtualbox - startCmd.Flags().String(hostOnlyCIDR, "192.168.99.1/24", "The CIDR to be used for the minikube VM (virtualbox driver only)") - startCmd.Flags().Bool(dnsProxy, false, "Enable proxy for NAT DNS requests (virtualbox driver only)") - startCmd.Flags().Bool(hostDNSResolver, true, "Enable host resolver for NAT DNS requests (virtualbox driver only)") - startCmd.Flags().Bool(noVTXCheck, false, "Disable checking for the availability of hardware virtualization before the vm is started (virtualbox driver only)") - startCmd.Flags().String(hostOnlyNicType, "virtio", "NIC Type used for host only network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only)") - startCmd.Flags().String(natNicType, "virtio", "NIC Type used for host only network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only)") - - // hyperkit - startCmd.Flags().StringSlice(vsockPorts, []string{}, "List of guest VSock ports that should be exposed as sockets on the host (hyperkit driver only)") - startCmd.Flags().String(uuid, "", "Provide VM UUID to restore MAC address (hyperkit driver only)") - startCmd.Flags().String(vpnkitSock, "", "Location of the VPNKit socket used for networking. If empty, disables Hyperkit VPNKitSock, if 'auto' uses Docker for Mac VPNKit connection, otherwise uses the specified VSock (hyperkit driver only)") - startCmd.Flags().StringSlice(nfsShare, []string{}, "Local folders to share with Guest via NFS mounts (hyperkit driver only)") - startCmd.Flags().String(nfsSharesRoot, "/nfsshares", "Where to root the NFS Shares, defaults to /nfsshares (hyperkit driver only)") - - // hyperv - startCmd.Flags().String(hypervVirtualSwitch, "", "The hyperv virtual switch name. Defaults to first found. (hyperv driver only)") - startCmd.Flags().Bool(hypervUseExternalSwitch, false, "Whether to use external switch over Default Switch if virtual switch not explicitly specified. (hyperv driver only)") - startCmd.Flags().String(hypervExternalAdapter, "", "External Adapter on which external switch will be created if no external switch is found. (hyperv driver only)") -} - -// initNetworkingFlags inits the commandline flags for connectivity related flags for start -func initNetworkingFlags() { - startCmd.Flags().StringSliceVar(&insecureRegistry, "insecure-registry", nil, "Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added.") - startCmd.Flags().StringSliceVar(®istryMirror, "registry-mirror", nil, "Registry mirrors to pass to the Docker daemon") - startCmd.Flags().String(imageRepository, "", "Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to \"auto\" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers") - startCmd.Flags().String(imageMirrorCountry, "", "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.") - startCmd.Flags().String(serviceCIDR, constants.DefaultServiceCIDR, "The CIDR to be used for service cluster IPs.") - startCmd.Flags().StringArrayVar(&node.DockerEnv, "docker-env", nil, "Environment variables to pass to the Docker daemon. (format: key=value)") - startCmd.Flags().StringArrayVar(&node.DockerOpt, "docker-opt", nil, "Specify arbitrary flags to pass to the Docker daemon. (format: key=value)") -} - // startCmd represents the start command var startCmd = &cobra.Command{ Use: "start", @@ -276,6 +124,13 @@ func platform() string { // runStart handles the executes the flow of "minikube start" func runStart(cmd *cobra.Command, args []string) { displayVersion(version.GetVersion()) + + // No need to do the update check if no one is going to see it + if !viper.GetBool(interactive) || !viper.GetBool(dryRun) { + // Avoid blocking execution on optional HTTP fetches + go notify.MaybePrintUpdateTextFromGithub() + } + displayEnviron(os.Environ()) // if --registry-mirror specified when run minikube start, @@ -289,16 +144,65 @@ func runStart(cmd *cobra.Command, args []string) { registryMirror = viper.GetStringSlice("registry_mirror") } - existing, err := config.Load(viper.GetString(config.ProfileName)) + existing, err := config.Load(ClusterFlagValue()) if err != nil && !config.IsNotExist(err) { exit.WithCodeT(exit.Data, "Unable to load config: {{.error}}", out.V{"error": err}) } - ds := selectDriver(existing) + validateSpecifiedDriver(existing) + ds, alts, specified := selectDriver(existing) + starter, err := provisionWithDriver(cmd, ds, existing) + if err != nil { + if specified { + // If the user specified a driver, don't fallback to anything else + exit.WithError("error provisioning host", err) + } else { + success := false + // Walk down the rest of the options + for _, alt := range alts { + out.WarningT("Startup with {{.old_driver}} driver failed, trying with alternate driver {{.new_driver}}: {{.error}}", out.V{"old_driver": ds.Name, "new_driver": alt.Name, "error": err}) + ds = alt + // Delete the existing cluster and try again with the next driver on the list + profile, err := config.LoadProfile(ClusterFlagValue()) + if err != nil { + glog.Warningf("%s profile does not exist, trying anyways.", ClusterFlagValue()) + } + + err = deleteProfile(profile) + if err != nil { + out.WarningT("Failed to delete cluster {{.name}}, proceeding with retry anyway.", out.V{"name": ClusterFlagValue()}) + } + starter, err = provisionWithDriver(cmd, ds, existing) + if err != nil { + continue + } else { + // Success! + success = true + break + } + } + if !success { + exit.WithError("error provisioning host", err) + } + } + } + + kubeconfig, err := startWithDriver(starter, existing) + if err != nil { + exit.WithError("failed to start node", err) + } + + if err := showKubectlInfo(kubeconfig, starter.Node.KubernetesVersion, starter.Cfg.Name); err != nil { + glog.Errorf("kubectl info: %v", err) + } + +} + +func provisionWithDriver(cmd *cobra.Command, ds registry.DriverState, existing *config.ClusterConfig) (node.Starter, error) { driverName := ds.Name glog.Infof("selected driver: %s", driverName) validateDriver(ds, existing) - err = autoSetDriverOptions(cmd, driverName) + err := autoSetDriverOptions(cmd, driverName) if err != nil { glog.Errorf("Error autoSetOptions : %v", err) } @@ -312,23 +216,23 @@ func runStart(cmd *cobra.Command, args []string) { } k8sVersion := getKubernetesVersion(existing) - mc, n, err := generateCfgFromFlags(cmd, k8sVersion, driverName) + cc, n, err := generateClusterConfig(cmd, existing, k8sVersion, driverName) if err != nil { - exit.WithError("Failed to generate config", err) + return node.Starter{}, errors.Wrap(err, "Failed to generate config") } // This is about as far as we can go without overwriting config files if viper.GetBool(dryRun) { out.T(out.DryRun, `dry-run validation complete!`) - return + os.Exit(0) } - if !driver.BareMetal(driverName) && !driver.IsKIC(driverName) { + if driver.IsVM(driverName) { url, err := download.ISO(viper.GetStringSlice(isoURL), cmd.Flags().Changed(isoURL)) if err != nil { - exit.WithError("Failed to cache ISO", err) + return node.Starter{}, errors.Wrap(err, "Failed to cache ISO") } - mc.MinikubeISO = url + cc.MinikubeISO = url } if viper.GetBool(nativeSSH) { @@ -337,14 +241,65 @@ func runStart(cmd *cobra.Command, args []string) { ssh.SetDefaultClient(ssh.External) } - kubeconfig, err := startNode(existing, mc, n) - if err != nil { - exit.WithError("Starting node", err) + var existingAddons map[string]bool + if viper.GetBool(installAddons) { + existingAddons = map[string]bool{} + if existing != nil && existing.Addons != nil { + existingAddons = existing.Addons + } } - if err := showKubectlInfo(kubeconfig, k8sVersion, mc.Name); err != nil { - glog.Errorf("kubectl info: %v", err) + mRunner, preExists, mAPI, host, err := node.Provision(&cc, &n, true) + if err != nil { + return node.Starter{}, err } + + return node.Starter{ + Runner: mRunner, + PreExists: preExists, + MachineAPI: mAPI, + Host: host, + ExistingAddons: existingAddons, + Cfg: &cc, + Node: &n, + }, nil +} + +func startWithDriver(starter node.Starter, existing *config.ClusterConfig) (*kubeconfig.Settings, error) { + kubeconfig, err := node.Start(starter, true) + if err != nil { + kubeconfig, err = maybeDeleteAndRetry(*starter.Cfg, *starter.Node, starter.ExistingAddons, err) + if err != nil { + return nil, err + } + } + + numNodes := viper.GetInt(nodes) + if numNodes == 1 && existing != nil { + numNodes = len(existing.Nodes) + } + if numNodes > 1 { + if driver.BareMetal(starter.Cfg.Driver) { + exit.WithCodeT(exit.Config, "The none driver is not compatible with multi-node clusters.") + } else { + for i := 1; i < numNodes; i++ { + nodeName := node.Name(i + 1) + n := config.Node{ + Name: nodeName, + Worker: true, + ControlPlane: false, + KubernetesVersion: starter.Cfg.KubernetesConfig.KubernetesVersion, + } + out.Ln("") // extra newline for clarity on the command line + err := node.Add(starter.Cfg, n) + if err != nil { + return nil, errors.Wrap(err, "adding node") + } + } + } + } + + return kubeconfig, nil } func updateDriver(driverName string) { @@ -358,16 +313,11 @@ func updateDriver(driverName string) { func displayVersion(version string) { prefix := "" - if viper.GetString(config.ProfileName) != constants.DefaultClusterName { - prefix = fmt.Sprintf("[%s] ", viper.GetString(config.ProfileName)) + if ClusterFlagValue() != constants.DefaultClusterName { + prefix = fmt.Sprintf("[%s] ", ClusterFlagValue()) } - versionState := out.Happy - if notify.MaybePrintUpdateTextFromGithub() { - versionState = out.Meh - } - - out.T(versionState, "{{.prefix}}minikube {{.version}} on {{.platform}}", out.V{"prefix": prefix, "version": version, "platform": platform()}) + out.T(out.Happy, "{{.prefix}}minikube {{.version}} on {{.platform}}", out.V{"prefix": prefix, "version": version, "platform": platform()}) } // displayEnviron makes the user aware of environment variables that will affect how minikube operates @@ -382,17 +332,6 @@ func displayEnviron(env []string) { } } -func startNode(existing *config.ClusterConfig, mc config.ClusterConfig, n config.Node) (*kubeconfig.Settings, error) { - var existingAddons map[string]bool - if viper.GetBool(installAddons) { - existingAddons = map[string]bool{} - if existing != nil && existing.Addons != nil { - existingAddons = existing.Addons - } - } - return node.Start(mc, n, true, existingAddons) -} - func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName string) error { if kcs.KeepContext { out.T(out.Kubectl, "To connect to this cluster, use: kubectl --context={{.name}}", out.V{"name": kcs.ClusterName}) @@ -402,26 +341,16 @@ func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName st path, err := exec.LookPath("kubectl") if err != nil { - out.T(out.Tip, "For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/") + out.ErrT(out.Tip, "For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/") return nil } - j, err := exec.Command(path, "version", "--client", "--output=json").Output() + gitVersion, err := kubectlVersion(path) if err != nil { - return errors.Wrap(err, "exec") + return err } - cv := struct { - ClientVersion struct { - GitVersion string `json:"gitVersion"` - } `json:"clientVersion"` - }{} - err = json.Unmarshal(j, &cv) - if err != nil { - return errors.Wrap(err, "unmarshal") - } - - client, err := semver.Make(strings.TrimPrefix(cv.ClientVersion.GitVersion, version.VersionPrefix)) + client, err := semver.Make(strings.TrimPrefix(gitVersion, version.VersionPrefix)) if err != nil { return errors.Wrap(err, "client semver") } @@ -432,42 +361,136 @@ func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName st if client.Major != cluster.Major || minorSkew > 1 { out.Ln("") - out.T(out.Warning, "{{.path}} is v{{.client_version}}, which may be incompatible with Kubernetes v{{.cluster_version}}.", + out.WarningT("{{.path}} is v{{.client_version}}, which may be incompatible with Kubernetes v{{.cluster_version}}.", out.V{"path": path, "client_version": client, "cluster_version": cluster}) - out.T(out.Tip, "You can also use 'minikube kubectl -- get pods' to invoke a matching version", + out.ErrT(out.Tip, "You can also use 'minikube kubectl -- get pods' to invoke a matching version", out.V{"path": path, "client_version": client}) } return nil } -func selectDriver(existing *config.ClusterConfig) registry.DriverState { +func maybeDeleteAndRetry(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool, originalErr error) (*kubeconfig.Settings, error) { + if viper.GetBool(deleteOnFailure) { + out.WarningT("Node {{.name}} failed to start, deleting and trying again.", out.V{"name": n.Name}) + // Start failed, delete the cluster and try again + profile, err := config.LoadProfile(cc.Name) + if err != nil { + out.ErrT(out.Meh, `"{{.name}}" profile does not exist, trying anyways.`, out.V{"name": cc.Name}) + } + + err = deleteProfile(profile) + if err != nil { + out.WarningT("Failed to delete cluster {{.name}}, proceeding with retry anyway.", out.V{"name": cc.Name}) + } + + var kubeconfig *kubeconfig.Settings + for _, n := range cc.Nodes { + r, p, m, h, err := node.Provision(&cc, &n, n.ControlPlane) + s := node.Starter{ + Runner: r, + PreExists: p, + MachineAPI: m, + Host: h, + Cfg: &cc, + Node: &n, + ExistingAddons: existingAddons, + } + if err != nil { + // Ok we failed again, let's bail + return nil, err + } + + k, err := node.Start(s, n.ControlPlane) + if n.ControlPlane { + kubeconfig = k + } + if err != nil { + // Ok we failed again, let's bail + return nil, err + } + } + return kubeconfig, nil + } + // Don't delete the cluster unless they ask + return nil, errors.Wrap(originalErr, "startup failed") +} + +func kubectlVersion(path string) (string, error) { + j, err := exec.Command(path, "version", "--client", "--output=json").Output() + if err != nil { + // really old kubernetes clients did not have the --output parameter + b, err := exec.Command(path, "version", "--client", "--short").Output() + if err != nil { + return "", errors.Wrap(err, "exec") + } + s := strings.TrimSpace(string(b)) + return strings.Replace(s, "Client Version: ", "", 1), nil + } + + cv := struct { + ClientVersion struct { + GitVersion string `json:"gitVersion"` + } `json:"clientVersion"` + }{} + err = json.Unmarshal(j, &cv) + if err != nil { + return "", errors.Wrap(err, "unmarshal") + } + + return cv.ClientVersion.GitVersion, nil +} + +func selectDriver(existing *config.ClusterConfig) (registry.DriverState, []registry.DriverState, bool) { // Technically unrelated, but important to perform before detection driver.SetLibvirtURI(viper.GetString(kvmQemuURI)) // By default, the driver is whatever we used last time - if existing != nil && existing.Driver != "" { - ds := driver.Status(existing.Driver) + if existing != nil { + old := hostDriver(existing) + ds := driver.Status(old) out.T(out.Sparkle, `Using the {{.driver}} driver based on existing profile`, out.V{"driver": ds.String()}) - return ds + return ds, nil, true } // Default to looking at the new driver parameter - if viper.GetString("driver") != "" { - ds := driver.Status(viper.GetString("driver")) + if d := viper.GetString("driver"); d != "" { + if vmd := viper.GetString("vm-driver"); vmd != "" { + // Output a warning + warning := `Both driver={{.driver}} and vm-driver={{.vmd}} have been set. + + Since vm-driver is deprecated, minikube will default to driver={{.driver}}. + + If vm-driver is set in the global config, please run "minikube config unset vm-driver" to resolve this warning. + ` + out.WarningT(warning, out.V{"driver": d, "vmd": vmd}) + } + ds := driver.Status(d) + if ds.Name == "" { + exit.WithCodeT(exit.Unavailable, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": d, "os": runtime.GOOS}) + } out.T(out.Sparkle, `Using the {{.driver}} driver based on user configuration`, out.V{"driver": ds.String()}) - return ds + return ds, nil, true } // Fallback to old driver parameter - if viper.GetString("vm-driver") != "" { + if d := viper.GetString("vm-driver"); d != "" { ds := driver.Status(viper.GetString("vm-driver")) + if ds.Name == "" { + exit.WithCodeT(exit.Unavailable, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": d, "os": runtime.GOOS}) + } out.T(out.Sparkle, `Using the {{.driver}} driver based on user configuration`, out.V{"driver": ds.String()}) - return ds + return ds, nil, true } - pick, alts := driver.Suggest(driver.Choices()) + choices := driver.Choices(viper.GetBool("vm")) + pick, alts, rejects := driver.Suggest(choices) if pick.Name == "" { - exit.WithCodeT(exit.Config, "Unable to determine a default driver to use. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/") + out.T(out.ThumbsDown, "Unable to pick a default driver. Here is what was considered, in preference order:") + for _, r := range rejects { + out.T(out.Option, "{{ .name }}: {{ .rejection }}", out.V{"name": r.Name, "rejection": r.Rejection}) + } + out.T(out.Workaround, "Try specifying a --driver, or see https://minikube.sigs.k8s.io/docs/start/") + os.Exit(exit.Unavailable) } if len(alts) > 1 { @@ -479,7 +502,72 @@ func selectDriver(existing *config.ClusterConfig) registry.DriverState { } else { out.T(out.Sparkle, `Automatically selected the {{.driver}} driver`, out.V{"driver": pick.String()}) } - return pick + return pick, alts, false +} + +// hostDriver returns the actual driver used by a libmachine host, which can differ from our config +func hostDriver(existing *config.ClusterConfig) string { + if existing == nil { + return "" + } + api, err := machine.NewAPIClient() + if err != nil { + glog.Warningf("selectDriver NewAPIClient: %v", err) + return existing.Driver + } + + cp, err := config.PrimaryControlPlane(existing) + if err != nil { + glog.Warningf("Unable to get control plane from existing config: %v", err) + return existing.Driver + } + machineName := driver.MachineName(*existing, cp) + h, err := api.Load(machineName) + if err != nil { + glog.Warningf("selectDriver api.Load: %v", err) + return existing.Driver + } + + return h.Driver.DriverName() +} + +// validateSpecifiedDriver makes sure that if a user has passed in a driver +// it matches the existing cluster if there is one +func validateSpecifiedDriver(existing *config.ClusterConfig) { + if existing == nil { + return + } + + var requested string + if d := viper.GetString("driver"); d != "" { + requested = d + } else if d := viper.GetString("vm-driver"); d != "" { + requested = d + } + + // Neither --vm-driver or --driver was specified + if requested == "" { + return + } + + old := hostDriver(existing) + if requested == old { + return + } + + out.ErrT(out.Conflict, `The existing "{{.name}}" VM was created using the "{{.old}}" driver, and is incompatible with the "{{.new}}" driver.`, + out.V{"name": existing.Name, "new": requested, "old": old}) + + out.ErrT(out.Workaround, `To proceed, either: + +1) Delete the existing "{{.name}}" cluster using: '{{.delcommand}}' + +* or * + +2) Start the existing "{{.name}}" cluster using: '{{.command}} --driver={{.old}}' +`, out.V{"command": mustload.ExampleCmd(existing.Name, "start"), "delcommand": mustload.ExampleCmd(existing.Name, "delete"), "old": old, "name": existing.Name}) + + exit.WithCodeT(exit.Config, "Exiting.") } // validateDriver validates that the selected driver appears sane, exits if not @@ -487,7 +575,7 @@ func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) { name := ds.Name glog.Infof("validating driver %q against %+v", name, existing) if !driver.Supported(name) { - exit.WithCodeT(exit.Unavailable, "The driver {{.experimental}} '{{.driver}}' is not supported on {{.os}}", out.V{"driver": name, "os": runtime.GOOS}) + exit.WithCodeT(exit.Unavailable, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": name, "os": runtime.GOOS}) } st := ds.State @@ -504,52 +592,18 @@ func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) { out.ErrLn("") if !st.Installed && !viper.GetBool(force) { - if existing != nil && name == existing.Driver { - exit.WithCodeT(exit.Unavailable, "{{.driver}} does not appear to be installed, but is specified by an existing profile. Please run 'minikube delete' or install {{.driver}}", out.V{"driver": name}) + if existing != nil { + if old := hostDriver(existing); name == old { + exit.WithCodeT(exit.Unavailable, "{{.driver}} does not appear to be installed, but is specified by an existing profile. Please run 'minikube delete' or install {{.driver}}", out.V{"driver": name}) + } } exit.WithCodeT(exit.Unavailable, "{{.driver}} does not appear to be installed", out.V{"driver": name}) } + + if !viper.GetBool(force) { + exit.WithCodeT(exit.Unavailable, "Failed to validate '{{.driver}}' driver", out.V{"driver": name}) + } } - - if existing == nil { - return - } - - api, err := machine.NewAPIClient() - if err != nil { - glog.Warningf("selectDriver NewAPIClient: %v", err) - return - } - - cp, err := config.PrimaryControlPlane(existing) - if err != nil { - exit.WithError("Error getting primary cp", err) - } - - machineName := driver.MachineName(*existing, cp) - h, err := api.Load(machineName) - if err != nil { - glog.Warningf("selectDriver api.Load: %v", err) - return - } - - if h.Driver.DriverName() == name { - return - } - - out.ErrT(out.Conflict, `The existing "{{.profile_name}}" VM that was created using the "{{.old_driver}}" driver, and is incompatible with the "{{.driver}}" driver.`, - out.V{"profile_name": machineName, "driver": name, "old_driver": h.Driver.DriverName()}) - - out.ErrT(out.Workaround, `To proceed, either: - - 1) Delete the existing "{{.profile_name}}" cluster using: '{{.command}} delete' - - * or * - - 2) Start the existing "{{.profile_name}}" cluster using: '{{.command}} start --driver={{.old_driver}}' - `, out.V{"command": minikubeCmd(), "old_driver": h.Driver.DriverName(), "profile_name": machineName}) - - exit.WithCodeT(exit.Config, "Exiting.") } func selectImageRepository(mirrorCountry string, v semver.Version) (bool, string, error) { @@ -603,14 +657,6 @@ func selectImageRepository(mirrorCountry string, v semver.Version) (bool, string return false, fallback, nil } -// Return a minikube command containing the current profile name -func minikubeCmd() string { - if viper.GetString(config.ProfileName) != constants.DefaultClusterName { - return fmt.Sprintf("minikube -p %s", config.ProfileName) - } - return "minikube" -} - // validateUser validates minikube is run by the recommended user (privileged or regular) func validateUser(drvName string) { u, err := user.Current() @@ -622,23 +668,24 @@ func validateUser(drvName string) { useForce := viper.GetBool(force) if driver.NeedsRoot(drvName) && u.Uid != "0" && !useForce { - exit.WithCodeT(exit.Permissions, `The "{{.driver_name}}" driver requires root privileges. Please run minikube using 'sudo minikube --driver={{.driver_name}}'.`, out.V{"driver_name": drvName}) + exit.WithCodeT(exit.Permissions, `The "{{.driver_name}}" driver requires root privileges. Please run minikube using 'sudo minikube start --driver={{.driver_name}}'.`, out.V{"driver_name": drvName}) } if driver.NeedsRoot(drvName) || u.Uid != "0" { return } - out.T(out.Stopped, `The "{{.driver_name}}" driver should not be used with root privileges.`, out.V{"driver_name": drvName}) - out.T(out.Tip, "If you are running minikube within a VM, consider using --driver=none:") - out.T(out.Documentation, " https://minikube.sigs.k8s.io/docs/reference/drivers/none/") + out.ErrT(out.Stopped, `The "{{.driver_name}}" driver should not be used with root privileges.`, out.V{"driver_name": drvName}) + out.ErrT(out.Tip, "If you are running minikube within a VM, consider using --driver=none:") + out.ErrT(out.Documentation, " https://minikube.sigs.k8s.io/docs/reference/drivers/none/") if !useForce { os.Exit(exit.Permissions) } - _, err = config.Load(viper.GetString(config.ProfileName)) + cname := ClusterFlagValue() + _, err = config.Load(cname) if err == nil || !config.IsNotExist(err) { - out.T(out.Tip, "Tip: To remove this root owned cluster, run: sudo {{.cmd}} delete", out.V{"cmd": minikubeCmd()}) + out.ErrT(out.Tip, "Tip: To remove this root owned cluster, run: sudo {{.cmd}}", out.V{"cmd": mustload.ExampleCmd(cname, "delete")}) } if !useForce { exit.WithCodeT(exit.Permissions, "Exiting") @@ -666,6 +713,9 @@ func memoryLimits(drvName string) (int, int, error) { // suggestMemoryAllocation calculates the default memory footprint in MB func suggestMemoryAllocation(sysLimit int, containerLimit int) int { + if mem := viper.GetInt(memory); mem != 0 { + return mem + } fallback := 2200 maximum := 6000 @@ -758,7 +808,7 @@ func validateFlags(cmd *cobra.Command, drvName string) { } if driver.BareMetal(drvName) { - if viper.GetString(config.ProfileName) != constants.DefaultClusterName { + if ClusterFlagValue() != constants.DefaultClusterName { exit.WithCodeT(exit.Config, "The '{{.name}} driver does not support multiple profiles: https://minikube.sigs.k8s.io/docs/reference/drivers/none/", out.V{"name": drvName}) } @@ -766,10 +816,18 @@ func validateFlags(cmd *cobra.Command, drvName string) { if runtime != "docker" { out.WarningT("Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!", out.V{"runtime": runtime}) } + + // conntrack is required starting with kubernetes 1.18, include the release candidates for completion + version, _ := util.ParseKubernetesVersion(getKubernetesVersion(nil)) + if version.GTE(semver.MustParse("1.18.0-beta.1")) { + if _, err := exec.LookPath("conntrack"); err != nil { + exit.WithCodeT(exit.Config, "Sorry, Kubernetes v{{.k8sVersion}} requires conntrack to be installed in root's path", out.V{"k8sVersion": version.String()}) + } + } } // check that kubeadm extra args contain only whitelisted parameters - for param := range node.ExtraOptions.AsMap().Get(bsutil.Kubeadm) { + for param := range config.ExtraOptions.AsMap().Get(bsutil.Kubeadm) { if !config.ContainsParam(bsutil.KubeadmExtraArgsWhitelist[bsutil.KubeadmCmdParam], param) && !config.ContainsParam(bsutil.KubeadmExtraArgsWhitelist[bsutil.KubeadmConfigParam], param) { exit.UsageT("Sorry, the kubeadm.{{.parameter_name}} parameter is currently not supported by --extra-config", out.V{"parameter_name": param}) @@ -797,145 +855,17 @@ func validateRegistryMirror() { } } -// generateCfgFromFlags generates config.Config based on flags and supplied arguments -func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string) (config.ClusterConfig, config.Node, error) { - r, err := cruntime.New(cruntime.Config{Type: viper.GetString(containerRuntime)}) - if err != nil { - return config.ClusterConfig{}, config.Node{}, err - } - - // Pick good default values for --network-plugin and --enable-default-cni based on runtime. - selectedEnableDefaultCNI := viper.GetBool(enableDefaultCNI) - selectedNetworkPlugin := viper.GetString(networkPlugin) - if r.DefaultCNI() && !cmd.Flags().Changed(networkPlugin) { - selectedNetworkPlugin = "cni" - if !cmd.Flags().Changed(enableDefaultCNI) { - selectedEnableDefaultCNI = true - } - } - - // Feed Docker our host proxy environment by default, so that it can pull images - if _, ok := r.(*cruntime.Docker); ok && !cmd.Flags().Changed("docker-env") { - setDockerProxy() - } - - repository := viper.GetString(imageRepository) - mirrorCountry := strings.ToLower(viper.GetString(imageMirrorCountry)) - if strings.ToLower(repository) == "auto" || mirrorCountry != "" { - found, autoSelectedRepository, err := selectImageRepository(mirrorCountry, semver.MustParse(k8sVersion)) - if err != nil { - exit.WithError("Failed to check main repository and mirrors for images for images", err) - } - - if !found { - if autoSelectedRepository == "" { - exit.WithCodeT(exit.Failure, "None of the known repositories is accessible. Consider specifying an alternative image repository with --image-repository flag") - } else { - out.WarningT("None of the known repositories in your location are accessible. Using {{.image_repository_name}} as fallback.", out.V{"image_repository_name": autoSelectedRepository}) - } - } - - repository = autoSelectedRepository - } - - if cmd.Flags().Changed(imageRepository) { - out.T(out.SuccessType, "Using image repository {{.name}}", out.V{"name": repository}) - } - - var kubeNodeName string - if drvName != driver.None { - kubeNodeName = "m01" - } - - return createNode(cmd, k8sVersion, kubeNodeName, drvName, - repository, selectedEnableDefaultCNI, selectedNetworkPlugin) -} - -func createNode(cmd *cobra.Command, k8sVersion, kubeNodeName, drvName, repository string, - selectedEnableDefaultCNI bool, selectedNetworkPlugin string) (config.ClusterConfig, config.Node, error) { - - sysLimit, containerLimit, err := memoryLimits(drvName) - if err != nil { - glog.Warningf("Unable to query memory limits: %v", err) - } - - mem := suggestMemoryAllocation(sysLimit, containerLimit) - if cmd.Flags().Changed(memory) { - mem, err = pkgutil.CalculateSizeInMB(viper.GetString(memory)) - if err != nil { - exit.WithCodeT(exit.Config, "Generate unable to parse memory '{{.memory}}': {{.error}}", out.V{"memory": viper.GetString(memory), "error": err}) - } - - } else { - glog.Infof("Using suggested %dMB memory alloc based on sys=%dMB, container=%dMB", mem, sysLimit, containerLimit) - } - +func createNode(cc config.ClusterConfig, kubeNodeName string) (config.ClusterConfig, config.Node, error) { // Create the initial node, which will necessarily be a control plane cp := config.Node{ - Port: viper.GetInt(apiServerPort), - KubernetesVersion: k8sVersion, + Port: cc.KubernetesConfig.NodePort, + KubernetesVersion: getKubernetesVersion(&cc), Name: kubeNodeName, ControlPlane: true, Worker: true, } - - diskSize, err := pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize)) - if err != nil { - exit.WithCodeT(exit.Config, "Generate unable to parse disk size '{{.diskSize}}': {{.error}}", out.V{"diskSize": viper.GetString(humanReadableDiskSize), "error": err}) - } - - cfg := config.ClusterConfig{ - Name: viper.GetString(config.ProfileName), - KeepContext: viper.GetBool(keepContext), - EmbedCerts: viper.GetBool(embedCerts), - MinikubeISO: viper.GetString(isoURL), - Memory: mem, - CPUs: viper.GetInt(cpus), - DiskSize: diskSize, - Driver: drvName, - HyperkitVpnKitSock: viper.GetString(vpnkitSock), - HyperkitVSockPorts: viper.GetStringSlice(vsockPorts), - NFSShare: viper.GetStringSlice(nfsShare), - NFSSharesRoot: viper.GetString(nfsSharesRoot), - DockerEnv: node.DockerEnv, - DockerOpt: node.DockerOpt, - InsecureRegistry: insecureRegistry, - RegistryMirror: registryMirror, - HostOnlyCIDR: viper.GetString(hostOnlyCIDR), - HypervVirtualSwitch: viper.GetString(hypervVirtualSwitch), - HypervUseExternalSwitch: viper.GetBool(hypervUseExternalSwitch), - HypervExternalAdapter: viper.GetString(hypervExternalAdapter), - KVMNetwork: viper.GetString(kvmNetwork), - KVMQemuURI: viper.GetString(kvmQemuURI), - KVMGPU: viper.GetBool(kvmGPU), - KVMHidden: viper.GetBool(kvmHidden), - DisableDriverMounts: viper.GetBool(disableDriverMounts), - UUID: viper.GetString(uuid), - NoVTXCheck: viper.GetBool(noVTXCheck), - DNSProxy: viper.GetBool(dnsProxy), - HostDNSResolver: viper.GetBool(hostDNSResolver), - HostOnlyNicType: viper.GetString(hostOnlyNicType), - NatNicType: viper.GetString(natNicType), - KubernetesConfig: config.KubernetesConfig{ - KubernetesVersion: k8sVersion, - ClusterName: viper.GetString(config.ProfileName), - APIServerName: viper.GetString(apiServerName), - APIServerNames: apiServerNames, - APIServerIPs: apiServerIPs, - DNSDomain: viper.GetString(dnsDomain), - FeatureGates: viper.GetString(featureGates), - ContainerRuntime: viper.GetString(containerRuntime), - CRISocket: viper.GetString(criSocket), - NetworkPlugin: selectedNetworkPlugin, - ServiceCIDR: viper.GetString(serviceCIDR), - ImageRepository: repository, - ExtraOptions: node.ExtraOptions, - ShouldLoadCachedImages: viper.GetBool(cacheImages), - EnableDefaultCNI: selectedEnableDefaultCNI, - }, - Nodes: []config.Node{cp}, - } - return cfg, cp, nil + cc.Nodes = []config.Node{cp} + return cc, cp, nil } // setDockerProxy sets the proxy environment variables in the docker environment. @@ -951,7 +881,7 @@ func setDockerProxy() { continue } } - node.DockerEnv = append(node.DockerEnv, fmt.Sprintf("%s=%s", k, v)) + config.DockerEnv = append(config.DockerEnv, fmt.Sprintf("%s=%s", k, v)) } } } @@ -960,10 +890,10 @@ func setDockerProxy() { func autoSetDriverOptions(cmd *cobra.Command, drvName string) (err error) { err = nil hints := driver.FlagDefaults(drvName) - if !cmd.Flags().Changed("extra-config") && len(hints.ExtraOptions) > 0 { + if len(hints.ExtraOptions) > 0 { for _, eo := range hints.ExtraOptions { glog.Infof("auto setting extra-config to %q.", eo) - err = node.ExtraOptions.Set(eo) + err = config.ExtraOptions.Set(eo) if err != nil { err = errors.Wrapf(err, "setting extra option %s", eo) } @@ -992,13 +922,15 @@ func autoSetDriverOptions(cmd *cobra.Command, drvName string) (err error) { func getKubernetesVersion(old *config.ClusterConfig) string { paramVersion := viper.GetString(kubernetesVersion) - if paramVersion == "" { // if the user did not specify any version then ... - if old != nil { // .. use the old version from config (if any) - paramVersion = old.KubernetesConfig.KubernetesVersion - } - if paramVersion == "" { // .. otherwise use the default version - paramVersion = constants.DefaultKubernetesVersion - } + // try to load the old version first if the user didn't specify anything + if paramVersion == "" && old != nil { + paramVersion = old.KubernetesConfig.KubernetesVersion + } + + if paramVersion == "" || strings.EqualFold(paramVersion, "stable") { + paramVersion = constants.DefaultKubernetesVersion + } else if strings.EqualFold(paramVersion, "latest") { + paramVersion = constants.NewestKubernetesVersion } nvs, err := semver.Make(strings.TrimPrefix(paramVersion, version.VersionPrefix)) diff --git a/cmd/minikube/cmd/start_flags.go b/cmd/minikube/cmd/start_flags.go new file mode 100644 index 0000000000..5b768a5267 --- /dev/null +++ b/cmd/minikube/cmd/start_flags.go @@ -0,0 +1,587 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "fmt" + "strings" + "time" + + "github.com/blang/semver" + "github.com/golang/glog" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "k8s.io/minikube/pkg/minikube/bootstrapper/bsutil" + "k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify" + "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/constants" + "k8s.io/minikube/pkg/minikube/cruntime" + "k8s.io/minikube/pkg/minikube/download" + "k8s.io/minikube/pkg/minikube/driver" + "k8s.io/minikube/pkg/minikube/exit" + "k8s.io/minikube/pkg/minikube/out" + pkgutil "k8s.io/minikube/pkg/util" + "k8s.io/minikube/pkg/version" +) + +const ( + isoURL = "iso-url" + memory = "memory" + cpus = "cpus" + humanReadableDiskSize = "disk-size" + nfsSharesRoot = "nfs-shares-root" + nfsShare = "nfs-share" + kubernetesVersion = "kubernetes-version" + hostOnlyCIDR = "host-only-cidr" + containerRuntime = "container-runtime" + criSocket = "cri-socket" + networkPlugin = "network-plugin" + enableDefaultCNI = "enable-default-cni" + hypervVirtualSwitch = "hyperv-virtual-switch" + hypervUseExternalSwitch = "hyperv-use-external-switch" + hypervExternalAdapter = "hyperv-external-adapter" + kvmNetwork = "kvm-network" + kvmQemuURI = "kvm-qemu-uri" + kvmGPU = "kvm-gpu" + kvmHidden = "kvm-hidden" + minikubeEnvPrefix = "MINIKUBE" + installAddons = "install-addons" + defaultDiskSize = "20000mb" + keepContext = "keep-context" + createMount = "mount" + featureGates = "feature-gates" + apiServerName = "apiserver-name" + apiServerPort = "apiserver-port" + dnsDomain = "dns-domain" + serviceCIDR = "service-cluster-ip-range" + imageRepository = "image-repository" + imageMirrorCountry = "image-mirror-country" + mountString = "mount-string" + disableDriverMounts = "disable-driver-mounts" + cacheImages = "cache-images" + uuid = "uuid" + vpnkitSock = "hyperkit-vpnkit-sock" + vsockPorts = "hyperkit-vsock-ports" + embedCerts = "embed-certs" + noVTXCheck = "no-vtx-check" + downloadOnly = "download-only" + dnsProxy = "dns-proxy" + hostDNSResolver = "host-dns-resolver" + waitComponents = "wait" + force = "force" + dryRun = "dry-run" + interactive = "interactive" + waitTimeout = "wait-timeout" + nativeSSH = "native-ssh" + minUsableMem = 1024 // Kubernetes will not start with less than 1GB + minRecommendedMem = 2000 // Warn at no lower than existing configurations + minimumCPUS = 2 + minimumDiskSize = 2000 + autoUpdate = "auto-update-drivers" + hostOnlyNicType = "host-only-nic-type" + natNicType = "nat-nic-type" + nodes = "nodes" + preload = "preload" + deleteOnFailure = "delete-on-failure" +) + +// initMinikubeFlags includes commandline flags for minikube. +func initMinikubeFlags() { + viper.SetEnvPrefix(minikubeEnvPrefix) + // Replaces '-' in flags with '_' in env variables + // e.g. iso-url => $ENVPREFIX_ISO_URL + viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_")) + viper.AutomaticEnv() + + startCmd.Flags().Bool(force, false, "Force minikube to perform possibly dangerous operations") + startCmd.Flags().Bool(interactive, true, "Allow user prompts for more information") + startCmd.Flags().Bool(dryRun, false, "dry-run mode. Validates configuration, but does not mutate system state") + + startCmd.Flags().Int(cpus, 2, "Number of CPUs allocated to Kubernetes.") + startCmd.Flags().String(memory, "", "Amount of RAM to allocate to Kubernetes (format: [], where unit = b, k, m or g).") + startCmd.Flags().String(humanReadableDiskSize, defaultDiskSize, "Disk size allocated to the minikube VM (format: [], where unit = b, k, m or g).") + startCmd.Flags().Bool(downloadOnly, false, "If true, only download and cache files for later use - don't install or start anything.") + startCmd.Flags().Bool(cacheImages, true, "If true, cache docker images for the current bootstrapper and load them into the machine. Always false with --driver=none.") + startCmd.Flags().StringSlice(isoURL, download.DefaultISOURLs(), "Locations to fetch the minikube ISO from.") + startCmd.Flags().Bool(keepContext, false, "This will keep the existing kubectl context and will create a minikube context.") + startCmd.Flags().Bool(embedCerts, false, "if true, will embed the certs in kubeconfig.") + startCmd.Flags().String(containerRuntime, "docker", "The container runtime to be used (docker, crio, containerd).") + startCmd.Flags().Bool(createMount, false, "This will start the mount daemon and automatically mount files into minikube.") + startCmd.Flags().String(mountString, constants.DefaultMountDir+":/minikube-host", "The argument to pass the minikube mount command on start.") + startCmd.Flags().StringArrayVar(&config.AddonList, "addons", nil, "Enable addons. see `minikube addons list` for a list of valid addon names.") + startCmd.Flags().String(criSocket, "", "The cri socket path to be used.") + startCmd.Flags().String(networkPlugin, "", "The name of the network plugin.") + startCmd.Flags().Bool(enableDefaultCNI, false, "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \"--network-plugin=cni\".") + startCmd.Flags().StringSlice(waitComponents, kverify.DefaultWaitList, fmt.Sprintf("comma separated list of kubernetes components to verify and wait for after starting a cluster. defaults to %q, available options: %q . other acceptable values are 'all' or 'none', 'true' and 'false'", strings.Join(kverify.DefaultWaitList, ","), strings.Join(kverify.AllComponentsList, ","))) + startCmd.Flags().Duration(waitTimeout, 6*time.Minute, "max time to wait per Kubernetes core services to be healthy.") + startCmd.Flags().Bool(nativeSSH, true, "Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'.") + startCmd.Flags().Bool(autoUpdate, true, "If set, automatically updates drivers to the latest version. Defaults to true.") + startCmd.Flags().Bool(installAddons, true, "If set, install addons. Defaults to true.") + startCmd.Flags().IntP(nodes, "n", 1, "The number of nodes to spin up. Defaults to 1.") + startCmd.Flags().Bool(preload, true, "If set, download tarball of preloaded images if available to improve start time. Defaults to true.") + startCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.") +} + +// initKubernetesFlags inits the commandline flags for kubernetes related options +func initKubernetesFlags() { + startCmd.Flags().String(kubernetesVersion, "", fmt.Sprintf("The kubernetes version that the minikube VM will use (ex: v1.2.3, 'stable' for %s, 'latest' for %s). Defaults to 'stable'.", constants.DefaultKubernetesVersion, constants.NewestKubernetesVersion)) + startCmd.Flags().Var(&config.ExtraOptions, "extra-config", + `A set of key=value pairs that describe configuration that may be passed to different components. + The key should be '.' separated, and the first part before the dot is the component to apply the configuration to. + Valid components are: kubelet, kubeadm, apiserver, controller-manager, etcd, proxy, scheduler + Valid kubeadm parameters: `+fmt.Sprintf("%s, %s", strings.Join(bsutil.KubeadmExtraArgsWhitelist[bsutil.KubeadmCmdParam], ", "), strings.Join(bsutil.KubeadmExtraArgsWhitelist[bsutil.KubeadmConfigParam], ","))) + startCmd.Flags().String(featureGates, "", "A set of key=value pairs that describe feature gates for alpha/experimental features.") + startCmd.Flags().String(dnsDomain, constants.ClusterDNSDomain, "The cluster dns domain name used in the kubernetes cluster") + startCmd.Flags().Int(apiServerPort, constants.APIServerPort, "The apiserver listening port") + startCmd.Flags().String(apiServerName, constants.APIServerName, "The authoritative apiserver hostname for apiserver certificates and connectivity. This can be used if you want to make the apiserver available from outside the machine") + startCmd.Flags().StringArrayVar(&apiServerNames, "apiserver-names", nil, "A set of apiserver names which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine") + startCmd.Flags().IPSliceVar(&apiServerIPs, "apiserver-ips", nil, "A set of apiserver IP Addresses which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine") +} + +// initDriverFlags inits the commandline flags for vm drivers +func initDriverFlags() { + startCmd.Flags().String("driver", "", fmt.Sprintf("Driver is one of: %v (defaults to auto-detect)", driver.DisplaySupportedDrivers())) + startCmd.Flags().String("vm-driver", "", "DEPRECATED, use `driver` instead.") + startCmd.Flags().Bool(disableDriverMounts, false, "Disables the filesystem mounts provided by the hypervisors") + startCmd.Flags().Bool("vm", false, "Filter to use only VM Drivers") + + // kvm2 + startCmd.Flags().String(kvmNetwork, "default", "The KVM network name. (kvm2 driver only)") + startCmd.Flags().String(kvmQemuURI, "qemu:///system", "The KVM QEMU connection URI. (kvm2 driver only)") + startCmd.Flags().Bool(kvmGPU, false, "Enable experimental NVIDIA GPU support in minikube") + startCmd.Flags().Bool(kvmHidden, false, "Hide the hypervisor signature from the guest in minikube (kvm2 driver only)") + + // virtualbox + startCmd.Flags().String(hostOnlyCIDR, "192.168.99.1/24", "The CIDR to be used for the minikube VM (virtualbox driver only)") + startCmd.Flags().Bool(dnsProxy, false, "Enable proxy for NAT DNS requests (virtualbox driver only)") + startCmd.Flags().Bool(hostDNSResolver, true, "Enable host resolver for NAT DNS requests (virtualbox driver only)") + startCmd.Flags().Bool(noVTXCheck, false, "Disable checking for the availability of hardware virtualization before the vm is started (virtualbox driver only)") + startCmd.Flags().String(hostOnlyNicType, "virtio", "NIC Type used for host only network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only)") + startCmd.Flags().String(natNicType, "virtio", "NIC Type used for host only network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only)") + + // hyperkit + startCmd.Flags().StringSlice(vsockPorts, []string{}, "List of guest VSock ports that should be exposed as sockets on the host (hyperkit driver only)") + startCmd.Flags().String(uuid, "", "Provide VM UUID to restore MAC address (hyperkit driver only)") + startCmd.Flags().String(vpnkitSock, "", "Location of the VPNKit socket used for networking. If empty, disables Hyperkit VPNKitSock, if 'auto' uses Docker for Mac VPNKit connection, otherwise uses the specified VSock (hyperkit driver only)") + startCmd.Flags().StringSlice(nfsShare, []string{}, "Local folders to share with Guest via NFS mounts (hyperkit driver only)") + startCmd.Flags().String(nfsSharesRoot, "/nfsshares", "Where to root the NFS Shares, defaults to /nfsshares (hyperkit driver only)") + + // hyperv + startCmd.Flags().String(hypervVirtualSwitch, "", "The hyperv virtual switch name. Defaults to first found. (hyperv driver only)") + startCmd.Flags().Bool(hypervUseExternalSwitch, false, "Whether to use external switch over Default Switch if virtual switch not explicitly specified. (hyperv driver only)") + startCmd.Flags().String(hypervExternalAdapter, "", "External Adapter on which external switch will be created if no external switch is found. (hyperv driver only)") +} + +// initNetworkingFlags inits the commandline flags for connectivity related flags for start +func initNetworkingFlags() { + startCmd.Flags().StringSliceVar(&insecureRegistry, "insecure-registry", nil, "Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added.") + startCmd.Flags().StringSliceVar(®istryMirror, "registry-mirror", nil, "Registry mirrors to pass to the Docker daemon") + startCmd.Flags().String(imageRepository, "", "Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to \"auto\" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers") + startCmd.Flags().String(imageMirrorCountry, "", "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.") + startCmd.Flags().String(serviceCIDR, constants.DefaultServiceCIDR, "The CIDR to be used for service cluster IPs.") + startCmd.Flags().StringArrayVar(&config.DockerEnv, "docker-env", nil, "Environment variables to pass to the Docker daemon. (format: key=value)") + startCmd.Flags().StringArrayVar(&config.DockerOpt, "docker-opt", nil, "Specify arbitrary flags to pass to the Docker daemon. (format: key=value)") +} + +// ClusterFlagValue returns the current cluster name based on flags +func ClusterFlagValue() string { + return viper.GetString(config.ProfileName) +} + +// generateClusterConfig generate a config.ClusterConfig based on flags or existing cluster config +func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k8sVersion string, drvName string) (config.ClusterConfig, config.Node, error) { + cc := config.ClusterConfig{} + if existing != nil { // create profile config first time + cc = updateExistingConfigFromFlags(cmd, existing) + } else { + glog.Info("no existing cluster config was found, will generate one from the flags ") + sysLimit, containerLimit, err := memoryLimits(drvName) + if err != nil { + glog.Warningf("Unable to query memory limits: %v", err) + } + + mem := suggestMemoryAllocation(sysLimit, containerLimit) + if cmd.Flags().Changed(memory) { + mem, err = pkgutil.CalculateSizeInMB(viper.GetString(memory)) + if err != nil { + exit.WithCodeT(exit.Config, "Generate unable to parse memory '{{.memory}}': {{.error}}", out.V{"memory": viper.GetString(memory), "error": err}) + } + + } else { + glog.Infof("Using suggested %dMB memory alloc based on sys=%dMB, container=%dMB", mem, sysLimit, containerLimit) + } + + diskSize, err := pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize)) + if err != nil { + exit.WithCodeT(exit.Config, "Generate unable to parse disk size '{{.diskSize}}': {{.error}}", out.V{"diskSize": viper.GetString(humanReadableDiskSize), "error": err}) + } + + r, err := cruntime.New(cruntime.Config{Type: viper.GetString(containerRuntime)}) + if err != nil { + return cc, config.Node{}, errors.Wrap(err, "new runtime manager") + } + + if cmd.Flags().Changed(imageRepository) { + cc.KubernetesConfig.ImageRepository = viper.GetString(imageRepository) + } + + // Pick good default values for --network-plugin and --enable-default-cni based on runtime. + selectedEnableDefaultCNI := viper.GetBool(enableDefaultCNI) + selectedNetworkPlugin := viper.GetString(networkPlugin) + if r.DefaultCNI() && !cmd.Flags().Changed(networkPlugin) { + selectedNetworkPlugin = "cni" + if !cmd.Flags().Changed(enableDefaultCNI) { + selectedEnableDefaultCNI = true + } + } + + repository := viper.GetString(imageRepository) + mirrorCountry := strings.ToLower(viper.GetString(imageMirrorCountry)) + if strings.ToLower(repository) == "auto" || mirrorCountry != "" { + found, autoSelectedRepository, err := selectImageRepository(mirrorCountry, semver.MustParse(strings.TrimPrefix(k8sVersion, version.VersionPrefix))) + if err != nil { + exit.WithError("Failed to check main repository and mirrors for images for images", err) + } + + if !found { + if autoSelectedRepository == "" { + exit.WithCodeT(exit.Failure, "None of the known repositories is accessible. Consider specifying an alternative image repository with --image-repository flag") + } else { + out.WarningT("None of the known repositories in your location are accessible. Using {{.image_repository_name}} as fallback.", out.V{"image_repository_name": autoSelectedRepository}) + } + } + + repository = autoSelectedRepository + } + + if cmd.Flags().Changed(imageRepository) { + out.T(out.SuccessType, "Using image repository {{.name}}", out.V{"name": repository}) + } + + cc = config.ClusterConfig{ + Name: ClusterFlagValue(), + KeepContext: viper.GetBool(keepContext), + EmbedCerts: viper.GetBool(embedCerts), + MinikubeISO: viper.GetString(isoURL), + Memory: mem, + CPUs: viper.GetInt(cpus), + DiskSize: diskSize, + Driver: drvName, + HyperkitVpnKitSock: viper.GetString(vpnkitSock), + HyperkitVSockPorts: viper.GetStringSlice(vsockPorts), + NFSShare: viper.GetStringSlice(nfsShare), + NFSSharesRoot: viper.GetString(nfsSharesRoot), + DockerEnv: config.DockerEnv, + DockerOpt: config.DockerOpt, + InsecureRegistry: insecureRegistry, + RegistryMirror: registryMirror, + HostOnlyCIDR: viper.GetString(hostOnlyCIDR), + HypervVirtualSwitch: viper.GetString(hypervVirtualSwitch), + HypervUseExternalSwitch: viper.GetBool(hypervUseExternalSwitch), + HypervExternalAdapter: viper.GetString(hypervExternalAdapter), + KVMNetwork: viper.GetString(kvmNetwork), + KVMQemuURI: viper.GetString(kvmQemuURI), + KVMGPU: viper.GetBool(kvmGPU), + KVMHidden: viper.GetBool(kvmHidden), + DisableDriverMounts: viper.GetBool(disableDriverMounts), + UUID: viper.GetString(uuid), + NoVTXCheck: viper.GetBool(noVTXCheck), + DNSProxy: viper.GetBool(dnsProxy), + HostDNSResolver: viper.GetBool(hostDNSResolver), + HostOnlyNicType: viper.GetString(hostOnlyNicType), + NatNicType: viper.GetString(natNicType), + KubernetesConfig: config.KubernetesConfig{ + KubernetesVersion: k8sVersion, + ClusterName: ClusterFlagValue(), + APIServerName: viper.GetString(apiServerName), + APIServerNames: apiServerNames, + APIServerIPs: apiServerIPs, + DNSDomain: viper.GetString(dnsDomain), + FeatureGates: viper.GetString(featureGates), + ContainerRuntime: viper.GetString(containerRuntime), + CRISocket: viper.GetString(criSocket), + NetworkPlugin: selectedNetworkPlugin, + ServiceCIDR: viper.GetString(serviceCIDR), + ImageRepository: repository, + ExtraOptions: config.ExtraOptions, + ShouldLoadCachedImages: viper.GetBool(cacheImages), + EnableDefaultCNI: selectedEnableDefaultCNI, + NodePort: viper.GetInt(apiServerPort), + }, + } + cc.VerifyComponents = interpretWaitFlag(*cmd) + } + + r, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime}) + if err != nil { + return cc, config.Node{}, errors.Wrap(err, "new runtime manager") + } + + // Feed Docker our host proxy environment by default, so that it can pull images + // doing this for both new config and existing, in case proxy changed since previous start + if _, ok := r.(*cruntime.Docker); ok && !cmd.Flags().Changed("docker-env") { + setDockerProxy() + } + + var kubeNodeName string + if driver.BareMetal(cc.Driver) { + kubeNodeName = "m01" + } + return createNode(cc, kubeNodeName) +} + +// updateExistingConfigFromFlags will update the existing config from the flags - used on a second start +// skipping updating existing docker env , docker opt, InsecureRegistry, registryMirror, extra-config, apiserver-ips +func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterConfig) config.ClusterConfig { //nolint to supress cyclomatic complexity 45 of func `updateExistingConfigFromFlags` is high (> 30) + validateFlags(cmd, existing.Driver) + + if cmd.Flags().Changed(containerRuntime) { + existing.KubernetesConfig.ContainerRuntime = viper.GetString(containerRuntime) + } + + if cmd.Flags().Changed(keepContext) { + existing.KeepContext = viper.GetBool(keepContext) + } + + if cmd.Flags().Changed(embedCerts) { + existing.EmbedCerts = viper.GetBool(embedCerts) + } + + if cmd.Flags().Changed(isoURL) { + existing.MinikubeISO = viper.GetString(isoURL) + } + + if cmd.Flags().Changed(memory) { + memInMB, err := pkgutil.CalculateSizeInMB(viper.GetString(memory)) + if err != nil { + glog.Warningf("error calculate memory size in mb : %v", err) + } + if memInMB != existing.Memory { + out.WarningT("You cannot change the memory size for an exiting minikube cluster. Please first delete the cluster.") + } + + } + + if cmd.Flags().Changed(cpus) { + if viper.GetInt(cpus) != existing.CPUs { + out.WarningT("You cannot change the CPUs for an exiting minikube cluster. Please first delete the cluster.") + } + } + + if cmd.Flags().Changed(humanReadableDiskSize) { + memInMB, err := pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize)) + if err != nil { + glog.Warningf("error calculate disk size in mb : %v", err) + } + + if memInMB != existing.DiskSize { + out.WarningT("You cannot change the Disk size for an exiting minikube cluster. Please first delete the cluster.") + } + } + + if cmd.Flags().Changed(vpnkitSock) { + existing.HyperkitVpnKitSock = viper.GetString(vpnkitSock) + } + + if cmd.Flags().Changed(vsockPorts) { + existing.HyperkitVSockPorts = viper.GetStringSlice(vsockPorts) + } + + if cmd.Flags().Changed(nfsShare) { + existing.NFSShare = viper.GetStringSlice(nfsShare) + } + + if cmd.Flags().Changed(nfsSharesRoot) { + existing.NFSSharesRoot = viper.GetString(nfsSharesRoot) + } + + if cmd.Flags().Changed(hostOnlyCIDR) { + existing.HostOnlyCIDR = viper.GetString(hostOnlyCIDR) + } + + if cmd.Flags().Changed(hypervVirtualSwitch) { + existing.HypervVirtualSwitch = viper.GetString(hypervVirtualSwitch) + } + + if cmd.Flags().Changed(hypervUseExternalSwitch) { + existing.HypervUseExternalSwitch = viper.GetBool(hypervUseExternalSwitch) + } + + if cmd.Flags().Changed(hypervExternalAdapter) { + existing.HypervExternalAdapter = viper.GetString(hypervExternalAdapter) + } + + if cmd.Flags().Changed(kvmNetwork) { + existing.KVMNetwork = viper.GetString(kvmNetwork) + } + + if cmd.Flags().Changed(kvmQemuURI) { + existing.KVMQemuURI = viper.GetString(kvmQemuURI) + } + + if cmd.Flags().Changed(kvmGPU) { + existing.KVMGPU = viper.GetBool(kvmGPU) + } + + if cmd.Flags().Changed(kvmHidden) { + existing.KVMHidden = viper.GetBool(kvmHidden) + } + + if cmd.Flags().Changed(disableDriverMounts) { + existing.DisableDriverMounts = viper.GetBool(disableDriverMounts) + } + + if cmd.Flags().Changed(uuid) { + existing.UUID = viper.GetString(uuid) + } + + if cmd.Flags().Changed(noVTXCheck) { + existing.NoVTXCheck = viper.GetBool(noVTXCheck) + } + + if cmd.Flags().Changed(dnsProxy) { + existing.DNSProxy = viper.GetBool(dnsProxy) + } + + if cmd.Flags().Changed(hostDNSResolver) { + existing.HostDNSResolver = viper.GetBool(hostDNSResolver) + } + + if cmd.Flags().Changed(hostOnlyNicType) { + existing.HostOnlyNicType = viper.GetString(hostOnlyNicType) + } + + if cmd.Flags().Changed(natNicType) { + existing.NatNicType = viper.GetString(natNicType) + } + + if cmd.Flags().Changed(kubernetesVersion) { + existing.KubernetesConfig.KubernetesVersion = viper.GetString(kubernetesVersion) + } + + if cmd.Flags().Changed(apiServerName) { + existing.KubernetesConfig.APIServerName = viper.GetString(apiServerName) + } + + if cmd.Flags().Changed("apiserver-names") { + existing.KubernetesConfig.APIServerNames = viper.GetStringSlice("apiserver-names") + } + + if cmd.Flags().Changed(apiServerPort) { + existing.KubernetesConfig.NodePort = viper.GetInt(apiServerPort) + } + + // pre minikube 1.9.2 cc.KubernetesConfig.NodePort was not populated. + // in minikube config there were two fields for api server port. + // one in cc.KubernetesConfig.NodePort and one in cc.Nodes.Port + // this makes sure api server port not be set as 0! + if existing.KubernetesConfig.NodePort == 0 { + existing.KubernetesConfig.NodePort = viper.GetInt(apiServerPort) + } + + if cmd.Flags().Changed(dnsDomain) { + existing.KubernetesConfig.DNSDomain = viper.GetString(dnsDomain) + } + + if cmd.Flags().Changed(featureGates) { + existing.KubernetesConfig.FeatureGates = viper.GetString(featureGates) + } + + if cmd.Flags().Changed(containerRuntime) { + existing.KubernetesConfig.ContainerRuntime = viper.GetString(containerRuntime) + } + + if cmd.Flags().Changed(criSocket) { + existing.KubernetesConfig.CRISocket = viper.GetString(criSocket) + } + + if cmd.Flags().Changed(criSocket) { + existing.KubernetesConfig.NetworkPlugin = viper.GetString(criSocket) + } + + if cmd.Flags().Changed(networkPlugin) { + existing.KubernetesConfig.NetworkPlugin = viper.GetString(networkPlugin) + } + + if cmd.Flags().Changed(serviceCIDR) { + existing.KubernetesConfig.ServiceCIDR = viper.GetString(serviceCIDR) + } + + if cmd.Flags().Changed(cacheImages) { + existing.KubernetesConfig.ShouldLoadCachedImages = viper.GetBool(cacheImages) + } + + if cmd.Flags().Changed(imageRepository) { + existing.KubernetesConfig.ImageRepository = viper.GetString(imageRepository) + } + + if cmd.Flags().Changed(enableDefaultCNI) { + existing.KubernetesConfig.EnableDefaultCNI = viper.GetBool(enableDefaultCNI) + } + + if cmd.Flags().Changed(waitComponents) { + existing.VerifyComponents = interpretWaitFlag(*cmd) + } + + return *existing +} + +// interpretWaitFlag interprets the wait flag and respects the legacy minikube users +// returns map of components to wait for +func interpretWaitFlag(cmd cobra.Command) map[string]bool { + if !cmd.Flags().Changed(waitComponents) { + glog.Infof("Wait components to verify : %+v", kverify.DefaultComponents) + return kverify.DefaultComponents + } + + waitFlags, err := cmd.Flags().GetStringSlice(waitComponents) + if err != nil { + glog.Warningf("Failed to read --wait from flags: %v.\n Moving on will use the default wait components: %+v", err, kverify.DefaultComponents) + return kverify.DefaultComponents + } + + if len(waitFlags) == 1 { + // respecting legacy flag before minikube 1.9.0, wait flag was boolean + if waitFlags[0] == "false" || waitFlags[0] == "none" { + glog.Infof("Waiting for no components: %+v", kverify.NoComponents) + return kverify.NoComponents + } + // respecting legacy flag before minikube 1.9.0, wait flag was boolean + if waitFlags[0] == "true" || waitFlags[0] == "all" { + glog.Infof("Waiting for all components: %+v", kverify.AllComponents) + return kverify.AllComponents + } + } + + waitComponents := kverify.NoComponents + for _, wc := range waitFlags { + seen := false + for _, valid := range kverify.AllComponentsList { + if wc == valid { + waitComponents[wc] = true + seen = true + continue + } + } + if !seen { + glog.Warningf("The value %q is invalid for --wait flag. valid options are %q", wc, strings.Join(kverify.AllComponentsList, ",")) + } + } + glog.Infof("Waiting for components: %+v", waitComponents) + return waitComponents +} diff --git a/cmd/minikube/cmd/start_test.go b/cmd/minikube/cmd/start_test.go index 23675528b6..9c6dfa93a1 100644 --- a/cmd/minikube/cmd/start_test.go +++ b/cmd/minikube/cmd/start_test.go @@ -26,7 +26,7 @@ import ( "k8s.io/minikube/pkg/minikube/constants" ) -func TestGetKuberneterVersion(t *testing.T) { +func TestGetKubernetesVersion(t *testing.T) { var tests = []struct { description string expectedVersion string @@ -55,6 +55,16 @@ func TestGetKuberneterVersion(t *testing.T) { paramVersion: "v1.16.0", cfg: &cfg.ClusterConfig{KubernetesConfig: cfg.KubernetesConfig{KubernetesVersion: "v1.15.0"}}, }, + { + description: "kubernetes-version given as 'stable', no config", + expectedVersion: constants.DefaultKubernetesVersion, + paramVersion: "stable", + }, + { + description: "kubernetes-version given as 'latest', no config", + expectedVersion: constants.NewestKubernetesVersion, + paramVersion: "latest", + }, } for _, test := range tests { @@ -70,6 +80,50 @@ func TestGetKuberneterVersion(t *testing.T) { } } +func TestMirrorCountry(t *testing.T) { + // Set default disk size value in lieu of flag init + viper.SetDefault(humanReadableDiskSize, defaultDiskSize) + + k8sVersion := constants.DefaultKubernetesVersion + var tests = []struct { + description string + k8sVersion string + imageRepository string + mirrorCountry string + cfg *cfg.ClusterConfig + }{ + { + description: "image-repository none, image-mirror-country none", + imageRepository: "", + mirrorCountry: "", + }, + { + description: "image-repository auto, image-mirror-country none", + imageRepository: "auto", + mirrorCountry: "", + }, + { + description: "image-repository auto, image-mirror-country china", + imageRepository: "auto", + mirrorCountry: "cn", + }, + } + + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + cmd := &cobra.Command{} + viper.SetDefault(imageRepository, test.imageRepository) + viper.SetDefault(imageMirrorCountry, test.mirrorCountry) + config, _, err := generateClusterConfig(cmd, nil, k8sVersion, "none") + if err != nil { + t.Fatalf("Got unexpected error %v during config generation", err) + } + // the result can still be "", but anyway + _ = config.KubernetesConfig.ImageRepository + }) + } +} + func TestGenerateCfgFromFlagsHTTPProxyHandling(t *testing.T) { // Set default disk size value in lieu of flag init viper.SetDefault(humanReadableDiskSize, defaultDiskSize) @@ -112,7 +166,7 @@ func TestGenerateCfgFromFlagsHTTPProxyHandling(t *testing.T) { if err := os.Setenv("HTTP_PROXY", test.proxy); err != nil { t.Fatalf("Unexpected error setting HTTP_PROXY: %v", err) } - config, _, err := generateCfgFromFlags(cmd, k8sVersion, "none") + config, _, err := generateClusterConfig(cmd, nil, k8sVersion, "none") if err != nil { t.Fatalf("Got unexpected error %v during config generation", err) } diff --git a/cmd/minikube/cmd/status.go b/cmd/minikube/cmd/status.go index 5d97709ca0..a95c7978cb 100644 --- a/cmd/minikube/cmd/status.go +++ b/cmd/minikube/cmd/status.go @@ -29,16 +29,14 @@ import ( "github.com/golang/glog" "github.com/pkg/errors" "github.com/spf13/cobra" - "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify" "k8s.io/minikube/pkg/minikube/cluster" "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/kubeconfig" "k8s.io/minikube/pkg/minikube/machine" - "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/mustload" ) var statusFormat string @@ -56,24 +54,35 @@ const ( // Nonexistent means nonexistent Nonexistent = "Nonexistent" // ~state.None + // Irrelevant is used for statuses that aren't meaningful for worker nodes + Irrelevant = "Irrelevant" ) // Status holds string representations of component states type Status struct { + Name string Host string Kubelet string APIServer string Kubeconfig string + Worker bool } const ( minikubeNotRunningStatusFlag = 1 << 0 clusterNotRunningStatusFlag = 1 << 1 k8sNotRunningStatusFlag = 1 << 2 - defaultStatusFormat = `host: {{.Host}} + defaultStatusFormat = `{{.Name}} +host: {{.Host}} kubelet: {{.Kubelet}} apiserver: {{.APIServer}} kubeconfig: {{.Kubeconfig}} + +` + workerStatusFormat = `{{.Name}} +host: {{.Host}} +kubelet: {{.Kubelet}} + ` ) @@ -90,47 +99,39 @@ var statusCmd = &cobra.Command{ exit.UsageT("Cannot use both --output and --format options") } - api, err := machine.NewAPIClient() - if err != nil { - exit.WithCodeT(exit.Unavailable, "Error getting client: {{.error}}", out.V{"error": err}) - } - defer api.Close() + cname := ClusterFlagValue() + api, cc := mustload.Partial(cname) - cc, err := config.Load(viper.GetString(config.ProfileName)) - if err != nil { - if config.IsNotExist(err) { - exit.WithCodeT(exitCode(&Status{}), `The "{{.name}}" cluster does not exist!`, out.V{"name": viper.GetString(config.ProfileName)}) + var st *Status + var err error + for _, n := range cc.Nodes { + glog.Infof("checking status of %s ...", n.Name) + machineName := driver.MachineName(*cc, n) + st, err = status(api, *cc, n) + glog.Infof("%s status: %+v", machineName, st) + + if err != nil { + glog.Errorf("status error: %v", err) } - exit.WithError("getting config", err) - } - - cp, err := config.PrimaryControlPlane(cc) - if err != nil { - exit.WithError("getting primary control plane", err) - } - - machineName := driver.MachineName(*cc, cp) - st, err := status(api, machineName) - if err != nil { - glog.Errorf("status error: %v", err) - } - if st.Host == Nonexistent { - glog.Errorf("The %q cluster does not exist!", machineName) - } - - switch strings.ToLower(output) { - case "text": - if err := statusText(st, os.Stdout); err != nil { - exit.WithError("status text failure", err) + if st.Host == Nonexistent { + glog.Errorf("The %q host does not exist!", machineName) } - case "json": - if err := statusJSON(st, os.Stdout); err != nil { - exit.WithError("status json failure", err) + + switch strings.ToLower(output) { + case "text": + if err := statusText(st, os.Stdout); err != nil { + exit.WithError("status text failure", err) + } + case "json": + if err := statusJSON(st, os.Stdout); err != nil { + exit.WithError("status json failure", err) + } + default: + exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'text', 'json'", output)) } - default: - exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'text', 'json'", output)) } + // TODO: Update for multi-node os.Exit(exitCode(st)) }, } @@ -140,21 +141,27 @@ func exitCode(st *Status) int { if st.Host != state.Running.String() { c |= minikubeNotRunningStatusFlag } - if st.APIServer != state.Running.String() || st.Kubelet != state.Running.String() { + if (st.APIServer != state.Running.String() && st.APIServer != Irrelevant) || st.Kubelet != state.Running.String() { c |= clusterNotRunningStatusFlag } - if st.Kubeconfig != Configured { + if st.Kubeconfig != Configured && st.Kubeconfig != Irrelevant { c |= k8sNotRunningStatusFlag } return c } -func status(api libmachine.API, name string) (*Status, error) { +func status(api libmachine.API, cc config.ClusterConfig, n config.Node) (*Status, error) { + + controlPlane := n.ControlPlane + name := driver.MachineName(cc, n) + st := &Status{ + Name: name, Host: Nonexistent, APIServer: Nonexistent, Kubelet: Nonexistent, Kubeconfig: Nonexistent, + Worker: !controlPlane, } hs, err := machine.Status(api, name) @@ -179,24 +186,16 @@ func status(api libmachine.API, name string) (*Status, error) { } // We have a fully operational host, now we can check for details - ip, err := cluster.GetHostDriverIP(api, name) - if err != nil { - glog.Errorln("Error host driver ip status:", err) - st.APIServer = state.Error.String() + if _, err := cluster.GetHostDriverIP(api, name); err != nil { + glog.Errorf("failed to get driver ip: %v", err) + st.Host = state.Error.String() return st, err } - port, err := kubeconfig.Port(name) - if err != nil { - glog.Warningf("unable to get port: %v", err) - port = constants.APIServerPort - } - - st.Kubeconfig = Misconfigured - ok, err := kubeconfig.IsClusterInConfig(ip, name) - glog.Infof("%s is in kubeconfig at ip %s: %v (err=%v)", name, ip, ok, err) - if ok { - st.Kubeconfig = Configured + st.Kubeconfig = Configured + if !controlPlane { + st.Kubeconfig = Irrelevant + st.APIServer = Irrelevant } host, err := machine.LoadHost(api, name) @@ -209,17 +208,28 @@ func status(api libmachine.API, name string) (*Status, error) { return st, err } - stk, err := kverify.KubeletStatus(cr) - glog.Infof("%s kubelet status = %s (err=%v)", name, stk, err) + stk := kverify.KubeletStatus(cr) + glog.Infof("%s kubelet status = %s", name, stk) + st.Kubelet = stk.String() - if err != nil { - glog.Warningf("kubelet err: %v", err) - st.Kubelet = state.Error.String() - } else { - st.Kubelet = stk.String() + // Early exit for regular nodes + if !controlPlane { + return st, nil } - sta, err := kverify.APIServerStatus(cr, ip, port) + hostname, _, port, err := driver.ControlPaneEndpoint(&cc, &n, host.DriverName) + if err != nil { + glog.Errorf("forwarded endpoint: %v", err) + st.Kubeconfig = Misconfigured + } else { + err := kubeconfig.VerifyEndpoint(cc.Name, hostname, port) + if err != nil { + glog.Errorf("kubeconfig endpoint: %v", err) + st.Kubeconfig = Misconfigured + } + } + + sta, err := kverify.APIServerStatus(cr, hostname, port) glog.Infof("%s apiserver status = %s (err=%v)", name, stk, err) if err != nil { @@ -242,6 +252,9 @@ For the list accessible variables for the template, see the struct values here: func statusText(st *Status, w io.Writer) error { tmpl, err := template.New("status").Parse(statusFormat) + if st.Worker && statusFormat == defaultStatusFormat { + tmpl, err = template.New("worker-status").Parse(workerStatusFormat) + } if err != nil { return err } diff --git a/cmd/minikube/cmd/status_test.go b/cmd/minikube/cmd/status_test.go index ef414631f8..b11e549a6d 100644 --- a/cmd/minikube/cmd/status_test.go +++ b/cmd/minikube/cmd/status_test.go @@ -51,18 +51,18 @@ func TestStatusText(t *testing.T) { }{ { name: "ok", - state: &Status{Host: "Running", Kubelet: "Running", APIServer: "Running", Kubeconfig: Configured}, - want: "host: Running\nkubelet: Running\napiserver: Running\nkubeconfig: Configured\n", + state: &Status{Name: "minikube", Host: "Running", Kubelet: "Running", APIServer: "Running", Kubeconfig: Configured}, + want: "minikube\nhost: Running\nkubelet: Running\napiserver: Running\nkubeconfig: Configured\n\n", }, { name: "paused", - state: &Status{Host: "Running", Kubelet: "Stopped", APIServer: "Paused", Kubeconfig: Configured}, - want: "host: Running\nkubelet: Stopped\napiserver: Paused\nkubeconfig: Configured\n", + state: &Status{Name: "minikube", Host: "Running", Kubelet: "Stopped", APIServer: "Paused", Kubeconfig: Configured}, + want: "minikube\nhost: Running\nkubelet: Stopped\napiserver: Paused\nkubeconfig: Configured\n\n", }, { name: "down", - state: &Status{Host: "Stopped", Kubelet: "Stopped", APIServer: "Stopped", Kubeconfig: Misconfigured}, - want: "host: Stopped\nkubelet: Stopped\napiserver: Stopped\nkubeconfig: Misconfigured\n\nWARNING: Your kubectl is pointing to stale minikube-vm.\nTo fix the kubectl context, run `minikube update-context`\n", + state: &Status{Name: "minikube", Host: "Stopped", Kubelet: "Stopped", APIServer: "Stopped", Kubeconfig: Misconfigured}, + want: "minikube\nhost: Stopped\nkubelet: Stopped\napiserver: Stopped\nkubeconfig: Misconfigured\n\n\nWARNING: Your kubectl is pointing to stale minikube-vm.\nTo fix the kubectl context, run `minikube update-context`\n", }, } for _, tc := range tests { diff --git a/cmd/minikube/cmd/stop.go b/cmd/minikube/cmd/stop.go index e6d131c7e4..a0b90f878b 100644 --- a/cmd/minikube/cmd/stop.go +++ b/cmd/minikube/cmd/stop.go @@ -24,13 +24,12 @@ import ( "github.com/golang/glog" "github.com/pkg/errors" "github.com/spf13/cobra" - "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/config" - pkg_config "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/kubeconfig" "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/util/retry" ) @@ -46,17 +45,10 @@ itself, leaving all files intact. The cluster can be started again with the "sta // runStop handles the executes the flow of "minikube stop" func runStop(cmd *cobra.Command, args []string) { - profile := viper.GetString(pkg_config.ProfileName) - api, err := machine.NewAPIClient() - if err != nil { - exit.WithError("Error getting client", err) - } - defer api.Close() + cname := ClusterFlagValue() - cc, err := config.Load(profile) - if err != nil { - exit.WithError("Error getting cluster config", err) - } + api, cc := mustload.Partial(cname) + defer api.Close() for _, n := range cc.Nodes { nonexistent := stop(api, *cc, n) @@ -67,19 +59,19 @@ func runStop(cmd *cobra.Command, args []string) { } if err := killMountProcess(); err != nil { - out.T(out.Warning, "Unable to kill mount process: {{.error}}", out.V{"error": err}) + out.WarningT("Unable to kill mount process: {{.error}}", out.V{"error": err}) } - err = kubeconfig.UnsetCurrentContext(profile, kubeconfig.PathFromEnv()) - if err != nil { + if err := kubeconfig.UnsetCurrentContext(cname, kubeconfig.PathFromEnv()); err != nil { exit.WithError("update config", err) } } func stop(api libmachine.API, cluster config.ClusterConfig, n config.Node) bool { nonexistent := false - stop := func() (err error) { - machineName := driver.MachineName(cluster, n) + machineName := driver.MachineName(cluster, n) + + tryStop := func() (err error) { err = machine.StopHost(api, machineName) if err == nil { return nil @@ -88,7 +80,7 @@ func stop(api libmachine.API, cluster config.ClusterConfig, n config.Node) bool switch err := errors.Cause(err).(type) { case mcnerror.ErrHostDoesNotExist: - out.T(out.Meh, `"{{.profile_name}}" does not exist, nothing to stop`, out.V{"profile_name": cluster}) + out.T(out.Meh, `"{{.machineName}}" does not exist, nothing to stop`, out.V{"machineName": machineName}) nonexistent = true return nil default: @@ -96,7 +88,7 @@ func stop(api libmachine.API, cluster config.ClusterConfig, n config.Node) bool } } - if err := retry.Expo(stop, 5*time.Second, 3*time.Minute, 5); err != nil { + if err := retry.Expo(tryStop, 1*time.Second, 120*time.Second, 5); err != nil { exit.WithError("Unable to stop VM", err) } diff --git a/cmd/minikube/cmd/tunnel.go b/cmd/minikube/cmd/tunnel.go index 493fc98797..dbf66cf110 100644 --- a/cmd/minikube/cmd/tunnel.go +++ b/cmd/minikube/cmd/tunnel.go @@ -27,13 +27,12 @@ import ( "github.com/golang/glog" "github.com/spf13/cobra" - "github.com/spf13/viper" "k8s.io/minikube/pkg/drivers/kic/oci" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/localpath" - "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/service" "k8s.io/minikube/pkg/minikube/tunnel" "k8s.io/minikube/pkg/minikube/tunnel/kic" @@ -51,6 +50,8 @@ var tunnelCmd = &cobra.Command{ }, Run: func(cmd *cobra.Command, args []string) { manager := tunnel.NewManager() + cname := ClusterFlagValue() + co := mustload.Healthy(cname) if cleanup { glog.Info("Checking for tunnels to cleanup...") @@ -60,13 +61,6 @@ var tunnelCmd = &cobra.Command{ return } - glog.Infof("Creating docker machine client...") - api, err := machine.NewAPIClient() - if err != nil { - exit.WithError("error creating machine client", err) - } - glog.Infof("Creating k8s client...") - // Tunnel uses the k8s clientset to query the API server for services in the LoadBalancerEmulator. // We define the tunnel and minikube error free if the API server responds within a second. // This also contributes to better UX, the tunnel status check can happen every second and @@ -76,11 +70,6 @@ var tunnelCmd = &cobra.Command{ exit.WithError("error creating clientset", err) } - cfg, err := config.Load(viper.GetString(config.ProfileName)) - if err != nil { - exit.WithError("Error getting config", err) - } - ctrlC := make(chan os.Signal, 1) signal.Notify(ctrlC, os.Interrupt) ctx, cancel := context.WithCancel(context.Background()) @@ -89,13 +78,13 @@ var tunnelCmd = &cobra.Command{ cancel() }() - if runtime.GOOS == "darwin" && cfg.Driver == oci.Docker { - port, err := oci.ForwardedPort(oci.Docker, cfg.Name, 22) + if runtime.GOOS == "darwin" && co.Config.Driver == oci.Docker { + port, err := oci.ForwardedPort(oci.Docker, cname, 22) if err != nil { exit.WithError("error getting ssh port", err) } sshPort := strconv.Itoa(port) - sshKey := filepath.Join(localpath.MiniPath(), "machines", cfg.Name, "id_rsa") + sshKey := filepath.Join(localpath.MiniPath(), "machines", cname, "id_rsa") kicSSHTunnel := kic.NewSSHTunnel(ctx, sshPort, sshKey, clientset.CoreV1()) err = kicSSHTunnel.Start() @@ -106,7 +95,7 @@ var tunnelCmd = &cobra.Command{ return } - done, err := manager.StartTunnel(ctx, cfg.Name, api, config.DefaultLoader, clientset.CoreV1()) + done, err := manager.StartTunnel(ctx, cname, co.API, config.DefaultLoader, clientset.CoreV1()) if err != nil { exit.WithError("error starting tunnel", err) } diff --git a/cmd/minikube/cmd/unpause.go b/cmd/minikube/cmd/unpause.go index 7549f1b008..e7295b74cd 100644 --- a/cmd/minikube/cmd/unpause.go +++ b/cmd/minikube/cmd/unpause.go @@ -17,7 +17,6 @@ limitations under the License. package cmd import ( - "os" "strings" "github.com/golang/glog" @@ -25,11 +24,12 @@ import ( "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/cluster" - "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" ) @@ -38,27 +38,12 @@ var unpauseCmd = &cobra.Command{ Use: "unpause", Short: "unpause Kubernetes", Run: func(cmd *cobra.Command, args []string) { - cname := viper.GetString(config.ProfileName) - api, err := machine.NewAPIClient() - if err != nil { - exit.WithError("Error getting client", err) - } - defer api.Close() - cc, err := config.Load(cname) + cname := ClusterFlagValue() + co := mustload.Running(cname) - if err != nil && !config.IsNotExist(err) { - exit.WithError("Error loading profile config", err) - } - - if err != nil { - out.ErrT(out.Meh, `"{{.name}}" profile does not exist`, out.V{"name": cname}) - os.Exit(1) - } - glog.Infof("config: %+v", cc) - - for _, n := range cc.Nodes { - machineName := driver.MachineName(*cc, n) - host, err := machine.LoadHost(api, machineName) + for _, n := range co.Config.Nodes { + machineName := driver.MachineName(*co.Config, n) + host, err := machine.LoadHost(co.API, machineName) if err != nil { exit.WithError("Error getting host", err) } @@ -68,7 +53,7 @@ var unpauseCmd = &cobra.Command{ exit.WithError("Failed to get command runner", err) } - cr, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime, Runner: r}) + cr, err := cruntime.New(cruntime.Config{Type: co.Config.KubernetesConfig.ContainerRuntime, Runner: r}) if err != nil { exit.WithError("Failed runtime", err) } @@ -98,6 +83,6 @@ var unpauseCmd = &cobra.Command{ } func init() { - unpauseCmd.Flags().StringSliceVarP(&namespaces, "--namespaces", "n", cluster.DefaultNamespaces, "namespaces to unpause") + unpauseCmd.Flags().StringSliceVarP(&namespaces, "--namespaces", "n", constants.DefaultNamespaces, "namespaces to unpause") unpauseCmd.Flags().BoolVarP(&allNamespaces, "all-namespaces", "A", false, "If set, unpause all namespaces") } diff --git a/cmd/minikube/cmd/update-context.go b/cmd/minikube/cmd/update-context.go index 2532672bbc..f2e08f5b6e 100644 --- a/cmd/minikube/cmd/update-context.go +++ b/cmd/minikube/cmd/update-context.go @@ -18,12 +18,9 @@ package cmd import ( "github.com/spf13/cobra" - "github.com/spf13/viper" - "k8s.io/minikube/pkg/minikube/cluster" - "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/kubeconfig" - "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" ) @@ -34,24 +31,17 @@ var updateContextCmd = &cobra.Command{ Long: `Retrieves the IP address of the running cluster, checks it with IP in kubeconfig, and corrects kubeconfig if incorrect.`, Run: func(cmd *cobra.Command, args []string) { - api, err := machine.NewAPIClient() - if err != nil { - exit.WithError("Error getting client", err) - } - defer api.Close() - machineName := viper.GetString(config.ProfileName) - ip, err := cluster.GetHostDriverIP(api, machineName) - if err != nil { - exit.WithError("Error host driver ip status", err) - } - updated, err := kubeconfig.UpdateIP(ip, machineName, kubeconfig.PathFromEnv()) + cname := ClusterFlagValue() + co := mustload.Running(cname) + + updated, err := kubeconfig.UpdateEndpoint(cname, co.CP.Hostname, co.CP.Port, kubeconfig.PathFromEnv()) if err != nil { exit.WithError("update config", err) } if updated { - out.T(out.Celebrate, "{{.machine}} IP has been updated to point at {{.ip}}", out.V{"machine": machineName, "ip": ip}) + out.T(out.Celebrate, `"{{.context}}" context has been updated to point to {{.hostname}}:{{.port}}`, out.V{"context": cname, "hostname": co.CP.Hostname, "port": co.CP.Port}) } else { - out.T(out.Meh, "{{.machine}} IP was already correctly configured for {{.ip}}", out.V{"machine": machineName, "ip": ip}) + out.T(out.Meh, `No changes required for the "{{.context}}" context`, out.V{"context": cname}) } }, diff --git a/cmd/minikube/cmd/version.go b/cmd/minikube/cmd/version.go index 00c61efd88..478a7aab92 100644 --- a/cmd/minikube/cmd/version.go +++ b/cmd/minikube/cmd/version.go @@ -17,20 +17,56 @@ limitations under the License. package cmd import ( + "encoding/json" + "github.com/spf13/cobra" + "gopkg.in/yaml.v2" + "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/version" ) +var ( + versionOutput string + shortVersion bool +) + var versionCmd = &cobra.Command{ Use: "version", Short: "Print the version of minikube", Long: `Print the version of minikube.`, Run: func(command *cobra.Command, args []string) { - out.Ln("minikube version: %v", version.GetVersion()) + minikubeVersion := version.GetVersion() gitCommitID := version.GetGitCommitID() - if gitCommitID != "" { - out.Ln("commit: %v", gitCommitID) + data := map[string]string{ + "minikubeVersion": minikubeVersion, + "commit": gitCommitID, + } + switch versionOutput { + case "": + out.Ln("minikube version: %v", minikubeVersion) + if !shortVersion && gitCommitID != "" { + out.Ln("commit: %v", gitCommitID) + } + case "json": + json, err := json.Marshal(data) + if err != nil { + exit.WithError("version json failure", err) + } + out.Ln(string(json)) + case "yaml": + yaml, err := yaml.Marshal(data) + if err != nil { + exit.WithError("version yaml failure", err) + } + out.Ln(string(yaml)) + default: + exit.WithCodeT(exit.BadUsage, "error: --output must be 'yaml' or 'json'") } }, } + +func init() { + versionCmd.Flags().StringVarP(&versionOutput, "output", "o", "", "One of 'yaml' or 'json'.") + versionCmd.Flags().BoolVar(&shortVersion, "short", false, "Print just the version number.") +} diff --git a/deploy/addons/dashboard/dashboard-dp.yaml b/deploy/addons/dashboard/dashboard-dp.yaml index 98ac68e359..8af7003de4 100644 --- a/deploy/addons/dashboard/dashboard-dp.yaml +++ b/deploy/addons/dashboard/dashboard-dp.yaml @@ -90,7 +90,7 @@ spec: containers: - name: kubernetes-dashboard # WARNING: This must match pkg/minikube/bootstrapper/images/images.go - image: kubernetesui/dashboard:v2.0.0-beta8 + image: kubernetesui/dashboard:v2.0.0-rc6 ports: - containerPort: 9090 protocol: TCP diff --git a/deploy/addons/gpu/nvidia-gpu-device-plugin.yaml.tmpl b/deploy/addons/gpu/nvidia-gpu-device-plugin.yaml similarity index 78% rename from deploy/addons/gpu/nvidia-gpu-device-plugin.yaml.tmpl rename to deploy/addons/gpu/nvidia-gpu-device-plugin.yaml index 96252d1ba5..e895e5394a 100644 --- a/deploy/addons/gpu/nvidia-gpu-device-plugin.yaml.tmpl +++ b/deploy/addons/gpu/nvidia-gpu-device-plugin.yaml @@ -42,26 +42,20 @@ spec: - name: device-plugin hostPath: path: /var/lib/kubelet/device-plugins - - name: dev - hostPath: - path: /dev containers: - - image: "{{default "k8s.gcr.io" .ImageRepository}}/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e" - command: ["/usr/bin/nvidia-gpu-device-plugin", "-logtostderr"] + - image: "nvidia/k8s-device-plugin:1.0.0-beta4" + command: ["/usr/bin/nvidia-device-plugin", "-logtostderr"] name: nvidia-gpu-device-plugin resources: requests: cpu: 50m memory: 10Mi - limits: - cpu: 50m - memory: 10Mi securityContext: - privileged: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] volumeMounts: - name: device-plugin - mountPath: /device-plugin - - name: dev - mountPath: /dev + mountPath: /var/lib/kubelet/device-plugins updateStrategy: type: RollingUpdate diff --git a/deploy/addons/helm-tiller/helm-tiller-dp.tmpl b/deploy/addons/helm-tiller/helm-tiller-dp.tmpl index 49ae46166e..deccc348a3 100644 --- a/deploy/addons/helm-tiller/helm-tiller-dp.tmpl +++ b/deploy/addons/helm-tiller/helm-tiller-dp.tmpl @@ -46,7 +46,7 @@ spec: value: kube-system - name: TILLER_HISTORY_MAX value: "0" - image: gcr.io/kubernetes-helm/tiller:v2.16.1 + image: gcr.io/kubernetes-helm/tiller:v2.16.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 3 diff --git a/deploy/addons/istio-provisioner/istio-operator.yaml.tmpl b/deploy/addons/istio-provisioner/istio-operator.yaml.tmpl index 0ac264461f..06b0ba995d 100644 --- a/deploy/addons/istio-provisioner/istio-operator.yaml.tmpl +++ b/deploy/addons/istio-provisioner/istio-operator.yaml.tmpl @@ -11,49 +11,24 @@ metadata: apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: istiocontrolplanes.install.istio.io + name: istiooperators.install.istio.io labels: kubernetes.io/minikube-addons: istio - addonmanager.kubernetes.io/mode: EnsureExists + addonmanager.kubernetes.io/mode: EnsureExists spec: group: install.istio.io names: - kind: IstioControlPlane - listKind: IstioControlPlaneList - plural: istiocontrolplanes - singular: istiocontrolplane + kind: IstioOperator + listKind: IstioOperatorList + plural: istiooperators + singular: istiooperator shortNames: - - icp + - iop scope: Namespaced subresources: status: {} - validation: - openAPIV3Schema: - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. - More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. - More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - spec: - description: 'Specification of the desired state of the istio control plane resource. - More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - type: object - status: - description: 'Status describes each of istio control plane component status at the current time. - 0 means NONE, 1 means UPDATING, 2 means HEALTHY, 3 means ERROR, 4 means RECONCILING. - More info: https://github.com/istio/operator/blob/master/pkg/apis/istio/v1alpha2/v1alpha2.pb.html & - https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - type: object versions: - - name: v1alpha2 + - name: v1alpha1 served: true storage: true ... @@ -243,9 +218,9 @@ spec: serviceAccountName: istio-operator containers: - name: istio-operator - image: docker.io/istio/operator:1.4.0 + image: docker.io/istio/operator:1.5.0 command: - - istio-operator + - operator - server imagePullPolicy: Always resources: @@ -257,7 +232,7 @@ spec: memory: 128Mi env: - name: WATCH_NAMESPACE - value: "" + value: "istio-system" - name: LEADER_ELECTION_NAMESPACE valueFrom: fieldRef: diff --git a/deploy/addons/istio/README.md b/deploy/addons/istio/README.md index 0cc971bcf5..306948b876 100644 --- a/deploy/addons/istio/README.md +++ b/deploy/addons/istio/README.md @@ -3,9 +3,10 @@ ### Enable istio on minikube Make sure to start minikube with at least 8192 MB of memory and 4 CPUs. +See official [Platform Setup](https://istio.io/docs/setup/platform-setup/) documentation. ```shell script -minikube start --memory=8000mb --cpus=4 +minikube start --memory=8192mb --cpus=4 ``` To enable this addon, simply run: diff --git a/deploy/addons/istio/istio-default-profile.yaml.tmpl b/deploy/addons/istio/istio-default-profile.yaml.tmpl index 036c6f31dd..6f4ade1271 100644 --- a/deploy/addons/istio/istio-default-profile.yaml.tmpl +++ b/deploy/addons/istio/istio-default-profile.yaml.tmpl @@ -1,10 +1,19 @@ -apiVersion: install.istio.io/v1alpha2 -kind: IstioControlPlane +apiVersion: v1 +kind: Namespace metadata: - namespace: istio-operator + name: istio-system + labels: + kubernetes.io/minikube-addons: istio + addonmanager.kubernetes.io/mode: EnsureExists + +--- +apiVersion: install.istio.io/v1alpha1 +kind: IstioOperator +metadata: + namespace: istio-system name: example-istiocontrolplane labels: kubernetes.io/minikube-addons: istio - addonmanager.kubernetes.io/mode: Reconcile + addonmanager.kubernetes.io/mode: Reconcile spec: profile: default diff --git a/deploy/addons/registry-aliases/README.md b/deploy/addons/registry-aliases/README.md new file mode 100644 index 0000000000..1cebc54759 --- /dev/null +++ b/deploy/addons/registry-aliases/README.md @@ -0,0 +1,149 @@ +# Minikube Registry Aliases Addon + +An addon to minikube that can help push and pull from the minikube registry using custom domain names. The custom domain names will be made resolveable from with in cluster and at minikube node. + +## How to use ? + +### Start minikube + +```shell +minikube start -p demo +``` +This addon depends on `registry` addon, it need to be enabled before the alias addon is installed: + +### Enable internal registry + +```shell +minikube addons enable registry +``` + +Verifying the registry deployment + +```shell +watch kubectl get pods -n kube-system +``` + +```shell +NAME READY STATUS RESTARTS AGE +coredns-6955765f44-kpbzt 1/1 Running 0 16m +coredns-6955765f44-lzlsv 1/1 Running 0 16m +etcd-demo 1/1 Running 0 16m +kube-apiserver-demo 1/1 Running 0 16m +kube-controller-manager-demo 1/1 Running 0 16m +kube-proxy-q8rb9 1/1 Running 0 16m +kube-scheduler-demo 1/1 Running 0 16m +*registry-4k8zs* 1/1 Running 0 40s +registry-proxy-vs8jt 1/1 Running 0 40s +storage-provisioner 1/1 Running 0 16m +``` + +```shell +kubectl get svc -n kube-system +``` + +```shell +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP,9153/TCP 17m +registry ClusterIP 10.97.247.75 80/TCP 94s +``` + +> +> **NOTE:** +> Please make a note of the CLUSTER-IP of `registry` service + +### Enable registry aliases addon + +```shell +minikube addons enable registry-aliases +🌟 The 'registry-aliases' addon is enabled +``` + +You can check the mikikube vm's `/etc/hosts` file for the registry aliases entries: + +```shell +watch minikube ssh -- cat /etc/hosts +``` + +```shell +127.0.0.1 localhost +127.0.1.1 demo +10.97.247.75 example.org +10.97.247.75 example.com +10.97.247.75 test.com +10.97.247.75 test.org +``` + +The above output shows that the Daemonset has added the `registryAliases` from the ConfigMap pointing to the internal registry's __CLUSTER-IP__. + +### Update CoreDNS + +The coreDNS would have been automatically updated by the patch-coredns. A successful job run will have coredns ConfigMap updated like: + +```yaml +apiVersion: v1 +data: + Corefile: |- + .:53 { + errors + health + rewrite name example.com registry.kube-system.svc.cluster.local + rewrite name example.org registry.kube-system.svc.cluster.local + rewrite name test.com registry.kube-system.svc.cluster.local + rewrite name test.org registry.kube-system.svc.cluster.local + kubernetes cluster.local in-addr.arpa ip6.arpa { + pods insecure + upstream + fallthrough in-addr.arpa ip6.arpa + } + prometheus :9153 + proxy . /etc/resolv.conf + cache 30 + loop + reload + loadbalance + } +kind: ConfigMap +metadata: + name: coredns +``` + +To verify it run the following command: + +```shell +kubectl get cm -n kube-system coredns -o yaml +``` + +Once you have successfully patched you can now push and pull from the registry using suffix `example.com`, `example.org`,`test.com` and `test.org`. + +The successful run will show the following extra pods (Daemonset, Job) in `kube-system` namespace: + +```shell +NAME READY STATUS RESTARTS AGE +registry-aliases-hosts-update-995vx 1/1 Running 0 47s +registry-aliases-patch-core-dns-zsxfc 0/1 Completed 0 47s +``` + +## Verify with sample application + +You can verify the deployment end to end using the example [application](https://github.com/kameshsampath/minikube-registry-aliases-demo). + +```shell +git clone https://github.com/kameshsampath/minikube-registry-aliases-demo +cd minikube-registry-aliases-demo +``` + +Make sure you set the docker context using `eval $(minikube -p demo docker-env)` + +Deploy the application using [Skaffold](https://skaffold.dev): + +```shell +skaffold dev --port-forward +``` + +Once the application is running try doing `curl localhost:8080` to see the `Hello World` response + +You can also update [skaffold.yaml](./skaffold.yaml) and [app.yaml](.k8s/app.yaml), to use `test.org`, `test.com` or `example.org` as container registry urls, and see all the container image names resolves to internal registry, resulting in successful build and deployment. + +> **NOTE**: +> +> You can also update [skaffold.yaml](./skaffold.yaml) and [app. yaml](.k8s/app.yaml), to use `test.org`, `test.com` or > `example.org` as container registry urls, and see all the > container image names resolves to internal registry, resulting in successful build and deployment. diff --git a/deploy/addons/registry-aliases/node-etc-hosts-update.tmpl b/deploy/addons/registry-aliases/node-etc-hosts-update.tmpl new file mode 100644 index 0000000000..0ef938876b --- /dev/null +++ b/deploy/addons/registry-aliases/node-etc-hosts-update.tmpl @@ -0,0 +1,51 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: registry-aliases-hosts-update + namespace: kube-system + labels: + kubernetes.io/minikube-addons: registry-aliases + addonmanager.kubernetes.io/mode: Reconcile +spec: + selector: + matchLabels: + app: registry-aliases-hosts-update + template: + metadata: + labels: + app: registry-aliases-hosts-update + spec: + initContainers: + - name: update + image: registry.fedoraproject.org/fedora + volumeMounts: + - name: etchosts + mountPath: /host-etc/hosts + readOnly: false + env: + - name: REGISTRY_ALIASES + valueFrom: + configMapKeyRef: + name: registry-aliases + key: registryAliases + command: + - bash + - -ce + - | + NL=$'\n' + TAB=$'\t' + HOSTS="$(cat /host-etc/hosts)" + [ -z "$REGISTRY_SERVICE_HOST" ] && echo "Failed to get hosts entry for default registry" && exit 1; + for H in $REGISTRY_ALIASES; do + echo "$HOSTS" | grep "$H" || HOSTS="$HOSTS$NL$REGISTRY_SERVICE_HOST$TAB$H"; + done; + echo "$HOSTS" | diff -u /host-etc/hosts - || echo "$HOSTS" > /host-etc/hosts + echo "Done." + containers: + - name: pause-for-update + image: gcr.io/google_containers/pause-amd64:3.1 + terminationGracePeriodSeconds: 30 + volumes: + - name: etchosts + hostPath: + path: /etc/hosts diff --git a/deploy/addons/registry-aliases/patch-coredns-job.tmpl b/deploy/addons/registry-aliases/patch-coredns-job.tmpl new file mode 100644 index 0000000000..cdda3bc7e5 --- /dev/null +++ b/deploy/addons/registry-aliases/patch-coredns-job.tmpl @@ -0,0 +1,26 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: registry-aliases-patch-core-dns + namespace: kube-system +spec: + ttlSecondsAfterFinished: 100 + template: + spec: + serviceAccountName: registry-aliases-sa + volumes: + - name: minikube + hostPath: + path: /var/lib/minikube/binaries + containers: + - name: core-dns-patcher + image: quay.io/rhdevelopers/core-dns-patcher + imagePullPolicy: IfNotPresent + # using the kubectl from the minikube instance + volumeMounts: + - mountPath: /var/lib/minikube/binaries + name: minikube + readOnly: true + restartPolicy: Never + backoffLimit: 4 \ No newline at end of file diff --git a/deploy/addons/registry-aliases/registry-aliases-config.tmpl b/deploy/addons/registry-aliases/registry-aliases-config.tmpl new file mode 100644 index 0000000000..0dacc4ed9c --- /dev/null +++ b/deploy/addons/registry-aliases/registry-aliases-config.tmpl @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: registry-aliases + namespace: kube-system + labels: + kubernetes.io/minikube-addons: registry-aliases + addonmanager.kubernetes.io/mode: Reconcile +data: + # Add additonal hosts seperated by new-line + registryAliases: >- + example.org + example.com + test.com + test.org + # default registry address in minikube when enabled via minikube addons enable registry + registrySvc: registry.kube-system.svc.cluster.local + diff --git a/deploy/addons/registry-aliases/registry-aliases-sa-crb.tmpl b/deploy/addons/registry-aliases/registry-aliases-sa-crb.tmpl new file mode 100644 index 0000000000..1ca1b60cc8 --- /dev/null +++ b/deploy/addons/registry-aliases/registry-aliases-sa-crb.tmpl @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: registry-aliases-crb +subjects: +- kind: ServiceAccount + name: registry-aliases-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/deploy/addons/registry-aliases/registry-aliases-sa.tmpl b/deploy/addons/registry-aliases/registry-aliases-sa.tmpl new file mode 100644 index 0000000000..a40fc37999 --- /dev/null +++ b/deploy/addons/registry-aliases/registry-aliases-sa.tmpl @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: registry-aliases-sa + namespace: kube-system \ No newline at end of file diff --git a/deploy/iso/minikube-iso/board/coreos/minikube/linux_defconfig b/deploy/iso/minikube-iso/board/coreos/minikube/linux_defconfig index ebf694f191..9611b6a9ac 100644 --- a/deploy/iso/minikube-iso/board/coreos/minikube/linux_defconfig +++ b/deploy/iso/minikube-iso/board/coreos/minikube/linux_defconfig @@ -1,4 +1,5 @@ # CONFIG_LOCALVERSION_AUTO is not set +CONFIG_KERNEL_LZ4=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_AUDIT=y @@ -25,10 +26,10 @@ CONFIG_CPUSETS=y CONFIG_CGROUP_DEVICE=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y CONFIG_USER_NS=y CONFIG_BLK_DEV_INITRD=y CONFIG_BPF_SYSCALL=y -CONFIG_CGROUP_BPF=y # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y CONFIG_SMP=y @@ -270,12 +271,14 @@ CONFIG_BRIDGE_EBT_LOG=m CONFIG_BRIDGE_EBT_NFLOG=m CONFIG_BRIDGE=m CONFIG_NET_SCHED=y +CONFIG_NET_SCH_TBF=y CONFIG_NET_SCH_NETEM=y CONFIG_NET_SCH_INGRESS=m CONFIG_NET_CLS_U32=m CONFIG_NET_CLS_CGROUP=y CONFIG_NET_CLS_BPF=m CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_IPSET=y CONFIG_NET_CLS_ACT=y CONFIG_NET_ACT_MIRRED=m CONFIG_NET_ACT_BPF=m @@ -380,6 +383,7 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y CONFIG_SERIAL_8250_DETECT_IRQ=y CONFIG_SERIAL_8250_RSA=y CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_VIRTIO=y # CONFIG_HW_RANDOM_INTEL is not set # CONFIG_HW_RANDOM_AMD is not set CONFIG_NVRAM=y @@ -504,3 +508,5 @@ CONFIG_PROVIDE_OHCI1394_DMA_INIT=y CONFIG_EARLY_PRINTK_DBGP=y CONFIG_DEBUG_BOOT_PARAMS=y CONFIG_OPTIMIZE_INLINING=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y diff --git a/deploy/iso/minikube-iso/package/crio-bin/crio-bin.hash b/deploy/iso/minikube-iso/package/crio-bin/crio-bin.hash index 28f2852b75..d0f12d3d77 100644 --- a/deploy/iso/minikube-iso/package/crio-bin/crio-bin.hash +++ b/deploy/iso/minikube-iso/package/crio-bin/crio-bin.hash @@ -11,3 +11,4 @@ sha256 70d4c746fe207422c78420dc4239768f485eea639a38c993c02872ec6305dd1d v1.15.2. sha256 05f9614c4d5970b4662499b84c270b0ab953596ee863dcd09c9dc7a2d2f09789 v1.16.0.tar.gz sha256 57e1ee990ef2d5af8b32c33a21b4998682608e3556dcf1d3349666f55e7d95b9 v1.16.1.tar.gz sha256 23a797762e4544ee7c171ef138cfc1141a3f0acc2838d9965c2a58e53b16c3ae v1.17.0.tar.gz +sha256 7967e9218fdfb59d6005a9e19c1668469bc5566c2a35927cffe7de8656bb22c7 v1.17.1.tar.gz diff --git a/deploy/iso/minikube-iso/package/crio-bin/crio-bin.mk b/deploy/iso/minikube-iso/package/crio-bin/crio-bin.mk index 0ddf03aeba..f1a58a31b7 100644 --- a/deploy/iso/minikube-iso/package/crio-bin/crio-bin.mk +++ b/deploy/iso/minikube-iso/package/crio-bin/crio-bin.mk @@ -4,8 +4,8 @@ # ################################################################################ -CRIO_BIN_VERSION = v1.17.0 -CRIO_BIN_COMMIT = 6d0ffae63b9b7d8f07e7f9cf50736a67fb31faf3 +CRIO_BIN_VERSION = v1.17.1 +CRIO_BIN_COMMIT = ee2de87bd8e2a7a84799476cb4fc4ce8a78fdf6d CRIO_BIN_SITE = https://github.com/cri-o/cri-o/archive CRIO_BIN_SOURCE = $(CRIO_BIN_VERSION).tar.gz CRIO_BIN_DEPENDENCIES = host-go libgpgme diff --git a/deploy/iso/minikube-iso/package/falco-probe/falco-probe.hash b/deploy/iso/minikube-iso/package/falco-probe/falco-probe.hash index a8afdf53cc..2a1b798854 100644 --- a/deploy/iso/minikube-iso/package/falco-probe/falco-probe.hash +++ b/deploy/iso/minikube-iso/package/falco-probe/falco-probe.hash @@ -1,5 +1,7 @@ # falco sha256 87c60273c35d544256e471b403497be33f24df662673338236ec92ba3fc1f8b7 0.19.0.tar.gz sha256 b873e3590e56ead740ed905108221f98da6100da3c5b7acf2355ea1cf628d931 0.20.0.tar.gz +sha256 b1c9884855d58be94a97b2e348bcdc7db995800f0405b0f4e9a7176ee2f094a7 0.21.0.tar.gz # sysdig sha256 6e477ac5fe9d3110b870bd4495f01541373a008c375a1934a2d1c46798b6bad6 146a431edf95829ac11bfd9c85ba3ef08789bffe.tar.gz +sha256 1c69363e4c36cdaeed413c2ef557af53bfc4bf1109fbcb6d6e18dc40fe6ddec8 be1ea2d9482d0e6e2cb14a0fd7e08cbecf517f94.tar.gz diff --git a/deploy/iso/minikube-iso/package/falco-probe/falco-probe.mk b/deploy/iso/minikube-iso/package/falco-probe/falco-probe.mk index 0e5986f1fc..888a16535a 100644 --- a/deploy/iso/minikube-iso/package/falco-probe/falco-probe.mk +++ b/deploy/iso/minikube-iso/package/falco-probe/falco-probe.mk @@ -4,7 +4,7 @@ # ######################################################################## -FALCO_PROBE_VERSION = 0.20.0 +FALCO_PROBE_VERSION = 0.21.0 FALCO_PROBE_SITE = https://github.com/falcosecurity/falco/archive FALCO_PROBE_SOURCE = $(FALCO_PROBE_VERSION).tar.gz FALCO_PROBE_DEPENDENCIES += ncurses libyaml @@ -12,7 +12,7 @@ FALCO_PROBE_LICENSE = Apache-2.0 FALCO_PROBE_LICENSE_FILES = COPYING # see cmake/modules/sysdig-repo/CMakeLists.txt -FALCO_PROBE_SYSDIG_VERSION = 146a431edf95829ac11bfd9c85ba3ef08789bffe +FALCO_PROBE_SYSDIG_VERSION = be1ea2d9482d0e6e2cb14a0fd7e08cbecf517f94 FALCO_PROBE_EXTRA_DOWNLOADS = https://github.com/draios/sysdig/archive/${FALCO_PROBE_SYSDIG_VERSION}.tar.gz define FALCO_PROBE_SYSDIG_SRC diff --git a/deploy/iso/minikube-iso/package/podman/podman.hash b/deploy/iso/minikube-iso/package/podman/podman.hash index af242fcc80..fda1df9870 100644 --- a/deploy/iso/minikube-iso/package/podman/podman.hash +++ b/deploy/iso/minikube-iso/package/podman/podman.hash @@ -12,3 +12,4 @@ sha256 2e027c1b935f3a03f27ef7f17823ccf334607a17d033d4ce53a90b98294e7f68 v1.4.4.t sha256 61b44b739c485125f179044f7aa7dc58c820f771bce4ce495fa555a38dc68b57 v1.6.3.tar.gz sha256 6e59821320b435543bc7554e73faa66d5956e4ad3f7e7f4ea03bebd6726758e9 v1.6.4.tar.gz sha256 50960293c2019e38ce69e4cf5f0a683e7fea1562b180e38e38c9355fcd7c4f0d v1.6.5.tar.gz +sha256 69f7ff81da1510ebf2962c1de3170675ca3cd8a24bc00c93742a24bcce17c752 v1.8.2.tar.gz diff --git a/deploy/iso/minikube-iso/package/podman/podman.mk b/deploy/iso/minikube-iso/package/podman/podman.mk index 911dc30cf3..8781c0b0b9 100644 --- a/deploy/iso/minikube-iso/package/podman/podman.mk +++ b/deploy/iso/minikube-iso/package/podman/podman.mk @@ -1,5 +1,5 @@ -PODMAN_VERSION = v1.6.5 -PODMAN_COMMIT = 45e7be192ef99e870c59a1cd2c1fa7940b0af2d6 +PODMAN_VERSION = v1.8.2 +PODMAN_COMMIT = 028e3317eb1494b9b2acba4a0a295df80fae66cc PODMAN_SITE = https://github.com/containers/libpod/archive PODMAN_SOURCE = $(PODMAN_VERSION).tar.gz PODMAN_LICENSE = Apache-2.0 diff --git a/deploy/minikube/release_sanity_test.go b/deploy/minikube/release_sanity_test.go index 22bb29d9c7..8817606af5 100644 --- a/deploy/minikube/release_sanity_test.go +++ b/deploy/minikube/release_sanity_test.go @@ -26,7 +26,6 @@ import ( "testing" retryablehttp "github.com/hashicorp/go-retryablehttp" - "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/notify" "k8s.io/minikube/pkg/util" ) diff --git a/deploy/minikube/releases.json b/deploy/minikube/releases.json index bc2e1d5181..fa48b6ded3 100644 --- a/deploy/minikube/releases.json +++ b/deploy/minikube/releases.json @@ -1,4 +1,28 @@ [ + { + "name": "v1.9.2", + "checksums": { + "darwin": "f27016246850b3145e1509e98f7ed060fd9575ac4d455c7bdc15277734372e85", + "linux": "3121f933bf8d608befb24628a045ce536658738c14618504ba46c92e656ea6b5", + "windows": "426586f33d88a484fdc5a3b326b0651d57860e9305a4f9d4180640e3beccaf6b" + } + }, + { + "name": "v1.9.1", + "checksums": { + "darwin": "ac8855ea54e798fa6f00e8c251b55c3d2a54e3b80e896162958a5ac7b0e3f60b", + "linux": "7174c881289a7302a05d477c67cc1ef5b48153e825089d6c0d0bcfaebe33d42a", + "windows": "91d15b2ef8f357aa463ae16de59f6e018120398f492ba4e35cd77f21acb27d5c" + } + }, + { + "name": "v1.9.0", + "checksums": { + "darwin": "2a074b0d842e3d9272444990374c6ffc51878c2d11c0434f54e15269b59593f9", + "linux": "81d77d1babe63be393e0a3204aac7825eb35e0fdf58ffefd9f66508a43864866", + "windows": "d11a957704c23670eac453a47897449a2aaab13b7dcd6424307f8932ac9f81bb" + } + }, { "name": "v1.8.2", "checksums": { diff --git a/docs/README.md b/docs/README.md deleted file mode 100644 index 16e26d9369..0000000000 --- a/docs/README.md +++ /dev/null @@ -1,49 +0,0 @@ -# Advanced Topics and Tutorials - -## Cluster Configuration - -* **Alternative Runtimes** ([alternative_runtimes.md](alternative_runtimes.md)): How to run minikube without Docker as the container runtime - -* **Environment Variables** ([env_vars.md](env_vars.md)): The different environment variables that minikube understands - -* **Minikube Addons** ([addons.md](addons.md)): Information on configuring addons to be run on minikube - -* **Configuring Kubernetes** ([configuring_kubernetes.md](configuring_kubernetes.md)): Configuring different Kubernetes components in minikube - -* **Caching Images** ([cache.md](cache.md)): Caching non-minikube images in minikube - -* **GPUs** ([gpu.md](gpu.md)): Using NVIDIA GPUs on minikube - -* **OpenID Connect Authentication** ([openid_connect_auth.md](openid_connect_auth.md)): Using OIDC Authentication on minikube - -### Installation and debugging - -* **Driver installation** ([drivers.md](drivers.md)): In depth instructions for installing the various hypervisor drivers - -* **Debugging minikube** ([debugging.md](debugging.md)): General practices for debugging the minikube binary itself - -### Developing on the minikube cluster - -* **Reusing the Docker Daemon** ([reusing_the_docker_daemon.md](reusing_the_docker_daemon.md)): How to point your docker CLI to the docker daemon running inside minikube - -* **Building images within the VM** ([building_images_within_the_vm.md](building_images_within_the_vm.md)): How to build a container image within the minikube VM - -#### Storage - -* **Persistent Volumes** ([persistent_volumes.md](persistent_volumes.md)): Persistent Volumes in Minikube and persisted locations in the VM - -* **Host Folder Mounting** ([host_folder_mount.md](host_folder_mount.md)): How to mount your files from your host into the minikube VM - -* **Syncing files into the VM** ([syncing-files.md](syncing-files.md)): How to sync files from your host into the minikube VM - -#### Networking - -* **HTTP Proxy** ([http_proxy.md](http_proxy.md)): Instruction on how to run minikube behind a HTTP Proxy - -* **Insecure or Private Registries** ([insecure_registry.md](insecure_registry.md)): How to use private or insecure registries with minikube - -* **Accessing etcd from inside the cluster** ([accessing_etcd.md](accessing_etcd.md)) - -* **Networking** ([networking.md](networking.md)): FAQ about networking between the host and minikube VM - -* **Offline** ([offline.md](offline.md)): Details about using minikube offline diff --git a/docs/accessing_etcd.md b/docs/accessing_etcd.md deleted file mode 100644 index b1593333d5..0000000000 --- a/docs/accessing_etcd.md +++ /dev/null @@ -1 +0,0 @@ -This document has moved to https://minikube.sigs.k8s.io/docs/tasks/accessing-host-resources/ \ No newline at end of file diff --git a/docs/addons.md b/docs/addons.md deleted file mode 100644 index ca879ccab8..0000000000 --- a/docs/addons.md +++ /dev/null @@ -1 +0,0 @@ -This document has moved to https://minikube.sigs.k8s.io/docs/tasks/addons/ diff --git a/docs/alternative_runtimes.md b/docs/alternative_runtimes.md deleted file mode 100644 index d0d648734e..0000000000 --- a/docs/alternative_runtimes.md +++ /dev/null @@ -1 +0,0 @@ -This document has moved to https://minikube.sigs.k8s.io/docs/reference/runtimes/ diff --git a/docs/building_images_within_the_vm.md b/docs/building_images_within_the_vm.md deleted file mode 100644 index 5b22a32031..0000000000 --- a/docs/building_images_within_the_vm.md +++ /dev/null @@ -1 +0,0 @@ -This document has moved to https://minikube.sigs.k8s.io/docs/tasks/building_within/ diff --git a/docs/cache.md b/docs/cache.md deleted file mode 100644 index 475bb7332c..0000000000 --- a/docs/cache.md +++ /dev/null @@ -1 +0,0 @@ -This document has moved to https://minikube.sigs.k8s.io/docs/tasks/caching diff --git a/docs/cli_commands.md b/docs/cli_commands.md deleted file mode 100644 index b547b26a5d..0000000000 --- a/docs/cli_commands.md +++ /dev/null @@ -1 +0,0 @@ -This document has moved to https://minikube.sigs.k8s.io/docs/reference/commands/ diff --git a/docs/configuring_kubernetes.md b/docs/configuring_kubernetes.md deleted file mode 100644 index cb7db70ae8..0000000000 --- a/docs/configuring_kubernetes.md +++ /dev/null @@ -1 +0,0 @@ -This document has moved to https://minikube.sigs.k8s.io/docs/reference/configuration/kubernetes/ diff --git a/docs/contributors/README.md b/docs/contributors/README.md deleted file mode 100644 index a8952b3c5f..0000000000 --- a/docs/contributors/README.md +++ /dev/null @@ -1 +0,0 @@ -This document has moved to https://minikube.sigs.k8s.io/docs/contributing/ diff --git a/docs/contributors/adding_an_addon.md b/docs/contributors/adding_an_addon.md deleted file mode 100644 index 9fae54108c..0000000000 --- a/docs/contributors/adding_an_addon.md +++ /dev/null @@ -1 +0,0 @@ -This document has moved to https://minikube.sigs.k8s.io/docs/contributing/addons/ \ No newline at end of file diff --git a/docs/contributors/adding_driver.md b/docs/contributors/adding_driver.md deleted file mode 100644 index fb3ad4579d..0000000000 --- a/docs/contributors/adding_driver.md +++ /dev/null @@ -1 +0,0 @@ -This document has moved to https://minikube.sigs.k8s.io/docs/contributing/drivers/ diff --git a/docs/contributors/build_guide.md b/docs/contributors/build_guide.md deleted file mode 100644 index dddbe5d7b3..0000000000 --- a/docs/contributors/build_guide.md +++ /dev/null @@ -1 +0,0 @@ -This document has moved to https://minikube.sigs.k8s.io/docs/contributing/building/ \ No newline at end of file diff --git a/docs/contributors/ci_builds.md b/docs/contributors/ci_builds.md deleted file mode 100644 index 33f7b21f42..0000000000 --- a/docs/contributors/ci_builds.md +++ /dev/null @@ -1 +0,0 @@ -This document has moved to https://minikube.sigs.k8s.io/docs/contributing/building/ diff --git a/docs/contributors/minikube_iso.md b/docs/contributors/minikube_iso.md deleted file mode 100644 index b9bc175ffa..0000000000 --- a/docs/contributors/minikube_iso.md +++ /dev/null @@ -1 +0,0 @@ -This document has moved to https://minikube.sigs.k8s.io/docs/contributing/iso/ diff --git a/docs/contributors/principles.md b/docs/contributors/principles.md deleted file mode 100644 index 2caffd4f37..0000000000 --- a/docs/contributors/principles.md +++ /dev/null @@ -1 +0,0 @@ -This document has moved to https://minikube.sigs.k8s.io/docs/concepts/principles/ diff --git a/docs/contributors/releasing_minikube.md b/docs/contributors/releasing_minikube.md deleted file mode 100644 index 4da4199c09..0000000000 --- a/docs/contributors/releasing_minikube.md +++ /dev/null @@ -1 +0,0 @@ -This document has moved to https://minikube.sigs.k8s.io/docs/contributing/releasing/ diff --git a/docs/contributors/roadmap.md b/docs/contributors/roadmap.md deleted file mode 100644 index 45e47a31e2..0000000000 --- a/docs/contributors/roadmap.md +++ /dev/null @@ -1 +0,0 @@ -This document has moved to https://minikube.sigs.k8s.io/docs/contributing/roadmap/ diff --git a/docs/dashboard.md b/docs/dashboard.md deleted file mode 100644 index cb7ff8f2d8..0000000000 --- a/docs/dashboard.md +++ /dev/null @@ -1 +0,0 @@ -This document has moved to https://minikube.sigs.k8s.io/docs/tasks/dashboard/ diff --git a/docs/debugging.md b/docs/debugging.md deleted file mode 100644 index b087d9c57f..0000000000 --- a/docs/debugging.md +++ /dev/null @@ -1 +0,0 @@ -This document has moved to https://minikube.sigs.k8s.io/docs/tasks/debug/ diff --git a/docs/drivers.md b/docs/drivers.md deleted file mode 100644 index 7b69ddd912..0000000000 --- a/docs/drivers.md +++ /dev/null @@ -1 +0,0 @@ -This document has moved to https://minikube.sigs.k8s.io/docs/reference/drivers/ diff --git a/docs/env_vars.md b/docs/env_vars.md deleted file mode 100644 index b90a70617b..0000000000 --- a/docs/env_vars.md +++ /dev/null @@ -1 +0,0 @@ -This document has moved to https://minikube.sigs.k8s.io/docs/reference/environment_variables diff --git a/docs/gpu.md b/docs/gpu.md deleted file mode 100644 index 56725b80fb..0000000000 --- a/docs/gpu.md +++ /dev/null @@ -1 +0,0 @@ -This document has moved to https://minikube.sigs.k8s.io/docs/tutorials/nvidia_gpu/ diff --git a/docs/host_folder_mount.md b/docs/host_folder_mount.md deleted file mode 100644 index 689f23f986..0000000000 --- a/docs/host_folder_mount.md +++ /dev/null @@ -1 +0,0 @@ -This document has moved to https://minikube.sigs.k8s.io/docs/tasks/mount/ diff --git a/docs/http_proxy.md b/docs/http_proxy.md deleted file mode 100644 index 789508a20f..0000000000 --- a/docs/http_proxy.md +++ /dev/null @@ -1 +0,0 @@ -This document has moved to https://minikube.sigs.k8s.io/docs/reference/networking/proxy/ diff --git a/docs/insecure_registry.md b/docs/insecure_registry.md deleted file mode 100644 index c02296e26c..0000000000 --- a/docs/insecure_registry.md +++ /dev/null @@ -1 +0,0 @@ -This document has moved to https://minikube.sigs.k8s.io/docs/tasks/registry/ diff --git a/docs/networking.md b/docs/networking.md deleted file mode 100644 index cc3ba1e46e..0000000000 --- a/docs/networking.md +++ /dev/null @@ -1,2 +0,0 @@ -This document has moved to https://minikube.sigs.k8s.io/docs/reference/networking/ - \ No newline at end of file diff --git a/docs/offline.md b/docs/offline.md deleted file mode 100644 index bca6193eec..0000000000 --- a/docs/offline.md +++ /dev/null @@ -1 +0,0 @@ -This document has moved to https://minikube.sigs.k8s.io/docs/reference/disk_cache/ diff --git a/docs/openid_connect_auth.md b/docs/openid_connect_auth.md deleted file mode 100644 index cbb2515911..0000000000 --- a/docs/openid_connect_auth.md +++ /dev/null @@ -1 +0,0 @@ -This document has moved to https://minikube.sigs.k8s.io/docs/tutorials/openid_connect_auth/ diff --git a/docs/persistent_volumes.md b/docs/persistent_volumes.md deleted file mode 100644 index df28b852af..0000000000 --- a/docs/persistent_volumes.md +++ /dev/null @@ -1 +0,0 @@ -This document has moved to https://minikube.sigs.k8s.io/docs/reference/persistent_volumes/ diff --git a/docs/reusing_the_docker_daemon.md b/docs/reusing_the_docker_daemon.md deleted file mode 100644 index 2b76f9534b..0000000000 --- a/docs/reusing_the_docker_daemon.md +++ /dev/null @@ -1 +0,0 @@ -This document has moved to https://minikube.sigs.k8s.io/docs/tasks/docker_daemon/ diff --git a/docs/syncing-files.md b/docs/syncing-files.md deleted file mode 100644 index ea217b2d38..0000000000 --- a/docs/syncing-files.md +++ /dev/null @@ -1 +0,0 @@ -This document has moved to https://minikube.sigs.k8s.io/docs/tasks/sync/ diff --git a/docs/tunnel.md b/docs/tunnel.md deleted file mode 100644 index b92c4c1b16..0000000000 --- a/docs/tunnel.md +++ /dev/null @@ -1 +0,0 @@ -This document has moved to https://minikube.sigs.k8s.io/docs/tasks/loadbalancer/ diff --git a/docs/vmdriver-none.md b/docs/vmdriver-none.md deleted file mode 100644 index 143ec71b2b..0000000000 --- a/docs/vmdriver-none.md +++ /dev/null @@ -1 +0,0 @@ -This document has moved to https://minikube.sigs.k8s.io/docs/reference/drivers/none/ diff --git a/enhancements/proposed/20200316-release-schedule/schedule-proposal.md b/enhancements/proposed/20200316-release-schedule/schedule-proposal.md new file mode 100644 index 0000000000..fdcb03ba08 --- /dev/null +++ b/enhancements/proposed/20200316-release-schedule/schedule-proposal.md @@ -0,0 +1,69 @@ +# Release Schedule + +* First proposed: 2020-03-30 +* Authors: Thomas Stromberg (@tstromberg) + +## Reviewer Priorities + +Please review this proposal with the following priorities: + +* Does this fit with minikube's [principles](https://minikube.sigs.k8s.io/docs/concepts/principles/)? +* Are there other approaches to consider? +* Could the implementation be made simpler? +* Are there usability, reliability, or technical debt concerns? + +Please leave the above text in your proposal as instructions to the reader. + +## Summary + +Adding structure to the release process to encourage predictable stress-free releases with fewer regressions. + +## Goals + +* A decrease in release regressions +* Minimal disruption to development velocity +* Compatible with the upstream Kubernetes release schedule + +## Non-Goals + +* Maintaining release branches + +## Design Details + +minikube currently has 3 types of releases: + +* Feature release (v1.9.0) +* Bugfix release (v1.9.1) +* Beta releases + +This proposal maintains the pre-existing structure, but adds dates for when each step will occur: + +* Day 0: Create milestones for the next regression & feature release +* Day 7: Regression release (optional) +* Day 14: Early Beta release (optional) +* Day 21: Beta release +* Day 24: Feature freeze and optional final beta +* Day 28: Feature release + +To synchronize with Kubernetes release schedule (Tuesday afternoon PST), minikube releases should be Wednesday morning (PST). To select a final release date, consult [sig-release](https://github.com/kubernetes/sig-release/tree/master/releases) to see if there is an upcoming minor release of Kubernetes within the next 6 weeks. If so, schedule the minikube release to occur within 24 hours of it. + +Even with this schedule, it is assumed that release dates may slip. + +## Alternatives Considered + +### Release branches + +Rather than considering master to always be in a releasable state, we could maintain long-lived release branches. This adds a lot of overhead to the release manager, as they have to manage cherry-picks. + +### Extending cycle by a week + +As this process assumes a regression release at Day 7, it begs the question on whether or not a 5-week feature release cycle makes more sense: + +* Day 0: Create milestones for the next regression & feature release +* Day 7: Regression release (optional) +* Day 21: Beta release +* Day 28: Beta 2 release +* Day 31: Feature freeze and optional final beta +* Day 35: Feature release + +The downside is a slightly lower release velocity, the upside may be more a more stable final release. diff --git a/go.mod b/go.mod index 6777081f84..62c03dbf00 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/docker/cli v0.0.0-20200303162255-7d407207c304 // indirect github.com/docker/docker v1.13.1 github.com/docker/go-units v0.4.0 - github.com/docker/machine v0.7.1-0.20190718054102-a555e4f7a8f5 // version is 0.7.1 to pin to a555e4f7a8f5 + github.com/docker/machine v0.7.1-0.20190902101342-b170508bf44c // v0.16.2^ github.com/elazarl/goproxy v0.0.0-20190421051319-9d40249d3c2f github.com/elazarl/goproxy/ext v0.0.0-20190421051319-9d40249d3c2f // indirect github.com/evanphx/json-patch v4.5.0+incompatible // indirect @@ -25,8 +25,10 @@ require ( github.com/gogo/protobuf v1.3.1 // indirect github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b - github.com/google/go-cmp v0.3.1 + github.com/google/go-cmp v0.3.2-0.20191028172631-481baca67f93 github.com/google/go-containerregistry v0.0.0-20200131185320-aec8da010de2 + github.com/google/go-github v17.0.0+incompatible + github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible // indirect github.com/googleapis/gnostic v0.3.0 // indirect github.com/hashicorp/go-getter v1.4.0 github.com/hashicorp/go-retryablehttp v0.5.4 @@ -34,7 +36,7 @@ require ( github.com/hooklift/iso9660 v0.0.0-20170318115843-1cf07e5970d8 github.com/imdario/mergo v0.3.8 // indirect github.com/intel-go/cpuid v0.0.0-20181003105527-1a4a6f06a1c6 // indirect - github.com/johanneswuerbach/nfsexports v0.0.0-20181204082207-1aa528dcb345 + github.com/johanneswuerbach/nfsexports v0.0.0-20200318065542-c48c3734757f github.com/juju/clock v0.0.0-20190205081909-9c5c9712527c github.com/juju/errors v0.0.0-20190806202954-0232dcc7464d // indirect github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8 // indirect @@ -44,17 +46,19 @@ require ( github.com/juju/utils v0.0.0-20180820210520-bf9cc5bdd62d // indirect github.com/juju/version v0.0.0-20180108022336-b64dbd566305 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 + github.com/kr/pretty v0.2.0 // indirect github.com/libvirt/libvirt-go v3.4.0+incompatible github.com/machine-drivers/docker-machine-driver-vmware v0.1.1 - github.com/mattn/go-isatty v0.0.9 + github.com/mattn/go-isatty v0.0.11 github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936 github.com/moby/hyperkit v0.0.0-20171020124204-a12cd7250bcd - github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5 + github.com/olekukonko/tablewriter v0.0.4 github.com/onsi/ginkgo v1.10.3 // indirect github.com/onsi/gomega v1.7.1 // indirect github.com/opencontainers/go-digest v1.0.0-rc1 github.com/otiai10/copy v1.0.2 github.com/pborman/uuid v1.2.0 + github.com/pelletier/go-toml v1.6.0 // indirect github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 github.com/pkg/browser v0.0.0-20160118053552-9302be274faa github.com/pkg/errors v0.9.1 @@ -62,11 +66,13 @@ require ( github.com/pmezard/go-difflib v1.0.0 github.com/prometheus/client_golang v1.1.0 // indirect github.com/prometheus/procfs v0.0.5 // indirect + github.com/russross/blackfriday v1.5.3-0.20200218234912-41c5fccfd6f6 // indirect github.com/samalba/dockerclient v0.0.0-20160414174713-91d7393ff859 // indirect github.com/shirou/gopsutil v2.18.12+incompatible + github.com/spf13/cast v1.3.1 // indirect github.com/spf13/cobra v0.0.5 github.com/spf13/pflag v1.0.5 - github.com/spf13/viper v1.3.2 + github.com/spf13/viper v1.6.1 github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f @@ -80,7 +86,10 @@ require ( google.golang.org/api v0.9.0 google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24 // indirect google.golang.org/grpc v1.26.0 // indirect + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect + gopkg.in/ini.v1 v1.51.1 // indirect gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 // indirect + gopkg.in/yaml.v2 v2.2.8 gotest.tools/v3 v3.0.2 // indirect k8s.io/api v0.17.3 k8s.io/apimachinery v0.17.3 @@ -94,7 +103,7 @@ require ( replace ( git.apache.org/thrift.git => github.com/apache/thrift v0.0.0-20180902110319-2566ecd5d999 github.com/docker/docker => github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7 - github.com/docker/machine => github.com/medyagh/machine v0.16.4 + github.com/docker/machine => github.com/machine-drivers/machine v0.7.1-0.20200323212942-41eb826190d8 github.com/hashicorp/go-getter => github.com/afbjorklund/go-getter v1.4.1-0.20190910175809-eb9f6c26742c github.com/samalba/dockerclient => github.com/sayboras/dockerclient v0.0.0-20191231050035-015626177a97 k8s.io/api => k8s.io/api v0.17.3 diff --git a/go.sum b/go.sum index 0cd403ed47..d9235acee9 100644 --- a/go.sum +++ b/go.sum @@ -43,14 +43,17 @@ github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tT github.com/Microsoft/hcsshim v0.0.0-20190417211021-672e52e9209d h1:u64+IetywsPQ0gJ/4cXBJ/KiXV9xTKRMoaCOzW9PI3g= github.com/Microsoft/hcsshim v0.0.0-20190417211021-672e52e9209d/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OpenPeeDeeP/depguard v1.0.0/go.mod h1:7/4sitnI9YlQgTLLk734QlzXT8DuHVnAyztLplQjk+o= github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= github.com/Parallels/docker-machine-parallels v1.3.0 h1:RG1fyf3v1GwXMCeHRiZkB4tL9phFZEv6ixcvRZ1raN8= github.com/Parallels/docker-machine-parallels v1.3.0/go.mod h1:HCOMm3Hulq/xuEVQMyZOuQlA+dSZpFY5kdCTZWjMVis= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Rican7/retry v0.1.0/go.mod h1:FgOROf8P5bebcC1DS0PdOQiqGUridaZvikzUmkFW6gg= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= @@ -61,13 +64,16 @@ github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmx github.com/afbjorklund/go-getter v1.4.1-0.20190910175809-eb9f6c26742c h1:18gEt7qzn7CW7qMkfPTFyyotlPbvPQo9o4IDV8jZqP4= github.com/afbjorklund/go-getter v1.4.1-0.20190910175809-eb9f6c26742c/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/auth0/go-jwt-middleware v0.0.0-20170425171159-5493cabe49f7/go.mod h1:LWMyo4iOLWXHGdBki7NIht1kHru/0wM179h+d3g8ATM= github.com/aws/aws-sdk-go v1.15.78 h1:LaXy6lWR0YK7LKyuU0QWy2ws/LWTPfYV/UgfiBu4tvY= @@ -93,6 +99,7 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU= github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/c4milo/gotoolkit v0.0.0-20170318115440-bcc06269efa9 h1:+ziP/wVJWuAORkjv7386TRidVKY57X0bXBZFMeFlW+U= @@ -103,6 +110,8 @@ github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEe github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/prettybench v0.0.0-20150116022406-03b8cfe5406c/go.mod h1:Xe6ZsFhtM8HrDku0pxJ3/Lr51rwykrzgFwpmTzleatY= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 h1:7aWHqerlJ41y6FOsEUvknqgXnGmJyJSbjhAWq5pO4F8= github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b h1:T4nWG1TXIxeor8mAu5bFguPJgSIGhZqv/f0z55KCrJM= github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b/go.mod h1:TrMrLQfeENAPYPRsJuq3jsqdlRh3lvi6trTZJG8+tho= @@ -132,6 +141,7 @@ github.com/containerd/typeurl v0.0.0-20190228175220-2a93cfde8c20/go.mod h1:Cm3kw github.com/containernetworking/cni v0.7.1 h1:fE3r16wpSEyaqY4Z4oFrLMmIGfBYIKpPrHK31EJ9FzE= github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/coredns/corefile-migration v1.0.4/go.mod h1:OFwBp/Wc9dJt5cAZzHWMNhK1r5L0p0jDwIBc6j8NC8E= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= @@ -143,6 +153,7 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9 github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= @@ -152,8 +163,10 @@ github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd h1:uVsMphB1eRx7xB1njzL3fuMdWRN8HtVzoUOItHMwv5c= github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017 h1:2HQmlpI3yI9deH18Q6xiSOIjXD4sLI55Y/gfpa8/558= github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= @@ -177,6 +190,7 @@ github.com/docker/libnetwork v0.8.0-dev.2.0.20190624125649-f0e46a78ea34/go.mod h github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20190421051319-9d40249d3c2f h1:8GDPb0tCY8LQ+OJ3dbHb5sA6YZWXFORQYZx5sdsTlMs= @@ -193,7 +207,9 @@ github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6 github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= +github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= @@ -202,6 +218,7 @@ github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= @@ -229,11 +246,13 @@ github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+ github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= @@ -247,6 +266,7 @@ github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nA github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= @@ -256,10 +276,12 @@ github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dp github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-ozzo/ozzo-validation v3.5.0+incompatible h1:sUy/in/P6askYr16XJgTKq/0SZhiWsdg4WZGaLsGQkM= github.com/go-ozzo/ozzo-validation v3.5.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= @@ -275,6 +297,7 @@ github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8/go.mod h1:WoM github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f h1:zlOR3rOlPAVvtfuxGKoghCmop5B0TRyu/ZieziZuGiM= github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= @@ -291,6 +314,8 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekf github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieFkXbj9YNNBupiGHJgFeLpO0j0Fza1h8= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.0.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0 h1:28o5sBqPkBsMGnC6b4MvE2TzSr5/AT4c/1fLqVGIwlk= @@ -324,13 +349,14 @@ github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAO github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8= github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/cadvisor v0.35.0/go.mod h1:1nql6U13uTHaLYB8rLS5x9IJc2qT6Xd/Tr1sTX6NE48= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.2-0.20191028172631-481baca67f93 h1:VvBteXw2zOXEgm0o3PgONTWf+bhUGsCaiNn3pbkU9LA= +github.com/google/go-cmp v0.3.2-0.20191028172631-481baca67f93/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-containerregistry v0.0.0-20200131185320-aec8da010de2 h1:/z0FoA29APs30PljxT6GoZQekF5c1cYhow2osFsj1XU= github.com/google/go-containerregistry v0.0.0-20200131185320-aec8da010de2/go.mod h1:Wtl/v6YdQxv397EREtzwgd9+Ud7Q5D8XMbi3Zazgkrs= github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= @@ -342,6 +368,8 @@ github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible h1:xmapqc1AyLoB+ddYT6r04bD9lIjlOqGaREovi0SzFaE= +github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f h1:Jnx61latede7zDD3DiiP4gmNz33uK0U5HDUaF0a/HVQ= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -358,6 +386,7 @@ github.com/googleapis/gnostic v0.2.2/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTV github.com/googleapis/gnostic v0.3.0 h1:CcQijm0XKekKjP/YCz28LXVSpgguuB+nCxaSjCe09y0= github.com/googleapis/gnostic v0.3.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -367,10 +396,13 @@ github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -391,7 +423,9 @@ github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uG github.com/hashicorp/hcl v0.0.0-20180404174102-ef8a98b0bbce/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/heketi/heketi v9.0.1-0.20190917153846-c2e2a4ab7ab9+incompatible h1:ysqc8k973k1lLJ4BOOHAkx14K2nt4cLjsIm+hwWDZDE= github.com/heketi/heketi v9.0.1-0.20190917153846-c2e2a4ab7ab9+incompatible/go.mod h1:bB9ly3RchcQqsQ9CpyaQwvva7RS5ytVoSoholZQON6o= +github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6 h1:oJ/NLadJn5HoxvonA6VxG31lg0d6XOURNA09BTtM4fY= github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7UkZt1i4FQeQy0R2T8GLUwQhOP5M1gBhy4= github.com/hooklift/assert v0.0.0-20170704181755-9d1defd6d214 h1:WgfvpuKg42WVLkxNwzfFraXkTXPK36bMqXvMFN67clI= github.com/hooklift/assert v0.0.0-20170704181755-9d1defd6d214/go.mod h1:kj6hFWqfwSjFjLnYW5PK1DoxZ4O0uapwHRmd9jhln4E= @@ -414,8 +448,9 @@ github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= -github.com/johanneswuerbach/nfsexports v0.0.0-20181204082207-1aa528dcb345 h1:XP1VL9iOZu4yz/rq8zj+yvB23XEY5erXRzp8JYmkWu0= -github.com/johanneswuerbach/nfsexports v0.0.0-20181204082207-1aa528dcb345/go.mod h1:+c1/kUpg2zlkoWqTOvzDs36Wpbm3Gd1nlmtXAEB0WGU= +github.com/johanneswuerbach/nfsexports v0.0.0-20200318065542-c48c3734757f h1:tL0xH80QVHQOde6Qqdohv6PewABH8l8N9pywZtuojJ0= +github.com/johanneswuerbach/nfsexports v0.0.0-20200318065542-c48c3734757f/go.mod h1:+c1/kUpg2zlkoWqTOvzDs36Wpbm3Gd1nlmtXAEB0WGU= +github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= @@ -424,6 +459,7 @@ github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/juju/clock v0.0.0-20190205081909-9c5c9712527c h1:3UvYABOQRhJAApj9MdCN+Ydv841ETSoy6xLzdmmr/9A= github.com/juju/clock v0.0.0-20190205081909-9c5c9712527c/go.mod h1:nD0vlnrUjcjJhqN5WuCWZyzfd5AHZAC9/ajvbSx69xA= @@ -460,6 +496,8 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxv github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= @@ -469,7 +507,9 @@ github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LE github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc= github.com/libvirt/libvirt-go v3.4.0+incompatible h1:Cpyalgj1x8JIeTlL6SDYZBo7j8nY3+5XHqmi8DaunCk= github.com/libvirt/libvirt-go v3.4.0+incompatible/go.mod h1:34zsnB4iGeOv7Byj6qotuW8Ya4v4Tr43ttjz/F0wjLE= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/lpabon/godbc v0.1.1/go.mod h1:Jo9QV0cf3U6jZABgiJ2skINAXb9j8m51r07g4KI92ZA= @@ -479,6 +519,8 @@ github.com/lucas-clemente/quic-go v0.10.2/go.mod h1:hvaRS9IHjFLMq76puFJeWNfmn+H7 github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced/go.mod h1:NCcRLrOTZbzhZvixZLlERbJtDtYsmMw8Jc4vS8Z0g58= github.com/machine-drivers/docker-machine-driver-vmware v0.1.1 h1:+E1IKKk+6kaQrCPg6edJZ/zISZijuZTPnzy6RE4C/Ho= github.com/machine-drivers/docker-machine-driver-vmware v0.1.1/go.mod h1:ej014C83EmSnxJeJ8PtVb8OLJ91PJKO1Q8Y7sM5CK0o= +github.com/machine-drivers/machine v0.7.1-0.20200323212942-41eb826190d8 h1:CIddS19fAKG4rUkZAotX0WPQtx/v/SdLhhDU3MVhLy0= +github.com/machine-drivers/machine v0.7.1-0.20200323212942-41eb826190d8/go.mod h1:79Uwa2hGd5S39LDJt58s8JZcIhGEK6pkq9bsuTbFWbk= github.com/magiconair/properties v1.7.6/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= @@ -489,6 +531,7 @@ github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -500,9 +543,13 @@ github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.9 h1:d5US/mDsogSGW37IV293h//ZFaeajb69h+EHFsv2xGg= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= +github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-shellwords v1.0.5 h1:JhhFTIOslh5ZsPrpa3Wdg8bF0WI3b44EMblmU9wIsXc= github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= @@ -510,8 +557,6 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0j github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2 h1:g+4J5sZg6osfvEfkRZxJ1em0VT95/UOZgi/l7zi1/oE= github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= -github.com/medyagh/machine v0.16.4 h1:oEsH3C1TYzs5axakAI/K1yc5O3r6de0+mCGumX4aHwM= -github.com/medyagh/machine v0.16.4/go.mod h1:/HegrAvHvD0AGQYQaLfrmUqxQTQF3Ks9qkj34p/ZH40= github.com/mesos/mesos-go v0.0.9/go.mod h1:kPYCMQ9gsOXVAle1OsoY4I1+9kPu8GHkf88aV59fDr4= github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY= github.com/miekg/dns v1.1.3/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= @@ -552,13 +597,17 @@ github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8m github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mvdan/xurls v1.1.0/go.mod h1:tQlNn3BED8bE/15hnSL2HLkDeLWpNPAwtw7wkEq44oU= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= github.com/nbutton23/zxcvbn-go v0.0.0-20160627004424-a22cb81b2ecd/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= github.com/nbutton23/zxcvbn-go v0.0.0-20171102151520-eafdab6b0663/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5 h1:58+kh9C6jJVXYjt8IE48G2eWl6BjwU5Gj0gqY84fy78= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.4 h1:vHD/YYe1Wolo78koG299f7V/VAS08c6IpCLn+Ejf/w8= +github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= @@ -597,6 +646,9 @@ github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtP github.com/pelletier/go-toml v1.1.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.6.0 h1:aetoXYr0Tv7xRU/V4B4IZJ2QcbtMUFoNb3ORp7TzIK4= +github.com/pelletier/go-toml v1.6.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 h1:JhzVVoYvbOACxoUmOs6V/G4D5nPVUW73rKvXxP4XUJc= github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= @@ -615,6 +667,7 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/pquerna/ffjson v0.0.0-20180717144149-af8b230fcd20/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8= @@ -625,16 +678,20 @@ github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8= github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= github.com/quobyte/api v0.1.2/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI= github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= @@ -647,6 +704,8 @@ github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c/go.mod h1:DM5xW0nvf github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday v1.5.3-0.20200218234912-41c5fccfd6f6 h1:tlXG832s5pa9x9Gs3Rp2rTvEqjiDEuETUOSfBEiTcns= +github.com/russross/blackfriday v1.5.3-0.20200218234912-41c5fccfd6f6/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sayboras/dockerclient v0.0.0-20191231050035-015626177a97 h1:DWY4yZN6w+FSKMeqBBXaalT8zmCn4DVwBGopShnlwFE= @@ -654,6 +713,7 @@ github.com/sayboras/dockerclient v0.0.0-20191231050035-015626177a97/go.mod h1:mU github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/seccomp/libseccomp-golang v0.9.1 h1:NJjM5DNFOs0s3kYE1WUOr6G8V97sdt46rlXTMfXGWBo= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil v2.18.12+incompatible h1:1eaJvGomDnH74/5cF4CTmTbLHAriGFsTZppLXDX93OM= @@ -668,10 +728,14 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.0/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= @@ -680,6 +744,8 @@ github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTd github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= @@ -698,6 +764,8 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.0.2/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.6.1 h1:VPZzIkznI1YhVMRi6vNFLHSwhnhReBfgTxIPccpfdZk= +github.com/spf13/viper v1.6.1/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= github.com/storageos/go-api v0.0.0-20180912212459-343b3eff91fc/go.mod h1:ZrLn+e0ZuF3Y65PNF6dIwbJPZqfmtCXxFm9ckv0agOY= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -710,6 +778,8 @@ github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= @@ -717,6 +787,8 @@ github.com/thecodeteam/goscaleio v0.1.0/go.mod h1:68sdkZAsK8bvEwBlbQnlLS+xU+hvLY github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/timakin/bodyclose v0.0.0-20190721030226-87058b9bfcec/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok= github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= @@ -747,6 +819,7 @@ github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSf github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/zchee/go-vmnet v0.0.0-20161021174912-97ebf9174097 h1:Ucx5I1l1+TWXvqFmBigYu4Ub4MLvUuUU/whjoUvV95I= github.com/zchee/go-vmnet v0.0.0-20161021174912-97ebf9174097/go.mod h1:lFZSWRIpCfE/pt91hHBBpV6+x87YlCjsp+aIR2qCPPU= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= @@ -757,6 +830,7 @@ go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= @@ -819,6 +893,7 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190502183928-7f726cade0ab/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= @@ -877,6 +952,7 @@ golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191010194322-b09406accb47 h1:/XfQ9z7ib8eEJX2hdgFTZJ/ntt0swNk5oYBziWeTCvY= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9 h1:1/DFK4b7JH8DmkqhUk48onnSfrPzImPoVxuomtbT2nk= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -956,6 +1032,7 @@ google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24 h1:wDju+RU97qa0FZT google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -967,11 +1044,14 @@ google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/cheggaaa/pb.v1 v1.0.27 h1:kJdccidYzt3CaHD1crCFTS1hxyhSi059NhOFUf03YFo= gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= @@ -983,6 +1063,9 @@ gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNj gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.51.1 h1:GyboHr4UqMiLUybYjd22ZjQIKEJEpgtLXtuGbR21Oho= +gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/mcuadros/go-syslog.v2 v2.2.1/go.mod h1:l5LPIyOOyIdQquNg+oU6Z3524YwrcqEm0aKH+5zpt2U= gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 h1:VpOs+IwYnYBaFnrNAeB8UUWtL3vEUnzSCL1nVjPhqrw= gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= @@ -1015,11 +1098,13 @@ honnef.co/go/tools v0.0.1-2019.2.2/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= k8s.io/api v0.17.3 h1:XAm3PZp3wnEdzekNkcmj/9Y1zdmQYJ1I4GKSBBZ8aG0= k8s.io/api v0.17.3/go.mod h1:YZ0OTkuw7ipbe305fMpIdf3GLXZKRigjtZaV5gzC2J0= +k8s.io/apiextensions-apiserver v0.17.3 h1:WDZWkPcbgvchEdDd7ysL21GGPx3UKZQLDZXEkevT6n4= k8s.io/apiextensions-apiserver v0.17.3/go.mod h1:CJbCyMfkKftAd/X/V6OTHYhVn7zXnDdnkUjS1h0GTeY= k8s.io/apimachinery v0.17.3 h1:f+uZV6rm4/tHE7xXgLyToprg6xWairaClGVkm2t8omg= k8s.io/apimachinery v0.17.3/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g= k8s.io/apiserver v0.17.3 h1:faZbSuFtJ4dx09vctKZGHms/7bp3qFtbqb10Swswqfs= k8s.io/apiserver v0.17.3/go.mod h1:iJtsPpu1ZpEnHaNawpSV0nYTGBhhX2dUlnn7/QS7QiY= +k8s.io/cli-runtime v0.17.3 h1:0ZlDdJgJBKsu77trRUynNiWsRuAvAVPBNaQfnt/1qtc= k8s.io/cli-runtime v0.17.3/go.mod h1:X7idckYphH4SZflgNpOOViSxetiMj6xI0viMAjM81TA= k8s.io/client-go v0.17.3 h1:deUna1Ksx05XeESH6XGCyONNFfiQmDdqeqUvicvP6nU= k8s.io/client-go v0.17.3/go.mod h1:cLXlTMtWHkuK4tD360KpWz2gG2KtdWEr/OT02i3emRQ= @@ -1054,6 +1139,7 @@ k8s.io/kubelet v0.17.3/go.mod h1:Nh8owUHZcUXtnDAtmGnip36Nw+X6c4rbmDQlVyIhwMQ= k8s.io/kubernetes v1.17.3 h1:zWCppkLfHM+hoLqfbsrQ0cJnYw+4vAvedI92oQnjo/Q= k8s.io/kubernetes v1.17.3/go.mod h1:gt28rfzaskIzJ8d82TSJmGrJ0XZD0BBy8TcQvTuCI3w= k8s.io/legacy-cloud-providers v0.17.3/go.mod h1:ujZML5v8efVQxiXXTG+nck7SjP8KhMRjUYNIsoSkYI0= +k8s.io/metrics v0.17.3 h1:IqXkNK+5E3vnobFD923Mn1QJEt3fb6+sK0wIjtBzOvw= k8s.io/metrics v0.17.3/go.mod h1:HEJGy1fhHOjHggW9rMDBJBD3YuGroH3Y1pnIRw9FFaI= k8s.io/repo-infra v0.0.1-alpha.1/go.mod h1:wO1t9WaB99V80ljbeENTnayuEEwNZt7gECYh/CEyOJ8= k8s.io/sample-apiserver v0.17.3/go.mod h1:cn/rvFIttGNqy1v88B5ZlDAbyyqDOoF7JHSwPiqNCNQ= @@ -1071,6 +1157,7 @@ mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIa mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34/go.mod h1:H6SUd1XjIs+qQCyskXg5OFSrilMRUkD8ePJpHKDPaeY= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +sigs.k8s.io/kustomize v2.0.3+incompatible h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0= sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= sigs.k8s.io/sig-storage-lib-external-provisioner v4.0.0+incompatible h1:qV3eFdgCp7Cp/ORjkJI9VBBEOntT+z385jLqdBtmgHA= sigs.k8s.io/sig-storage-lib-external-provisioner v4.0.0+incompatible/go.mod h1:qhqLyNwJC49PoUalmtzYb4s9fT8HOMBTLbTY1QoVOqI= @@ -1079,4 +1166,5 @@ sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1 sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= +vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc h1:MksmcCZQWAQJCTA5T0jgI/0sJ51AVm4Z41MrmfczEoc= vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= diff --git a/hack/images/kicbase.Dockerfile b/hack/images/kicbase.Dockerfile index baadf88df3..97a29b7371 100644 --- a/hack/images/kicbase.Dockerfile +++ b/hack/images/kicbase.Dockerfile @@ -2,7 +2,7 @@ ARG COMMIT_SHA # using base image created by kind https://github.com/kubernetes-sigs/kind/blob/master/images/base/Dockerfile # which is an ubuntu 19.10 with an entry-point that helps running systemd # could be changed to any debian that can run systemd -FROM kindest/base:v20200122-2dfe64b2 as base +FROM kindest/base:v20200317-92225082 as base USER root # specify version of everything explicitly using 'apt-cache policy' RUN apt-get update && apt-get install -y --no-install-recommends \ @@ -12,14 +12,16 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ docker.io=19.03.2-0ubuntu1 \ openssh-server=1:8.0p1-6build1 \ dnsutils=1:9.11.5.P4+dfsg-5.1ubuntu2.1 \ + # libglib2.0-0 is required for conmon, which is required for podman + libglib2.0-0=2.62.1-1 \ && rm /etc/crictl.yaml # install cri-o based on https://github.com/cri-o/cri-o/commit/96b0c34b31a9fc181e46d7d8e34fb8ee6c4dc4e1#diff-04c6e90faac2675aa89e2176d2eec7d8R128 RUN sh -c "echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_19.10/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list" && \ curl -LO https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/xUbuntu_19.10/Release.key && \ apt-key add - < Release.key && apt-get update && \ - apt-get install -y --no-install-recommends cri-o-1.17=1.17.0-3 + apt-get install -y --no-install-recommends cri-o-1.17=1.17.2~1 # install podman -RUN apt-get install -y --no-install-recommends podman=1.8.0~7 +RUN apt-get install -y --no-install-recommends podman=1.8.2~144 # disable non-docker runtimes by default RUN systemctl disable containerd && systemctl disable crio && rm /etc/crictl.yaml # enable docker which is default diff --git a/hack/jenkins/common.sh b/hack/jenkins/common.sh index 404b7aa4f9..3035caae53 100755 --- a/hack/jenkins/common.sh +++ b/hack/jenkins/common.sh @@ -286,7 +286,6 @@ fi touch "${TEST_OUT}" ${SUDO_PREFIX}${E2E_BIN} \ -minikube-start-args="--driver=${VM_DRIVER} ${EXTRA_START_ARGS}" \ - -expected-default-driver="${EXPECTED_DEFAULT_DRIVER}" \ -test.timeout=70m -test.v \ ${EXTRA_TEST_ARGS} \ -binary="${MINIKUBE_BIN}" 2>&1 | tee "${TEST_OUT}" @@ -339,9 +338,9 @@ fi echo ">> Installing gopogh" if [ "$(uname)" != "Darwin" ]; then - curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.16/gopogh-linux-amd64 && sudo install gopogh-linux-amd64 /usr/local/bin/gopogh + curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.18/gopogh-linux-amd64 && sudo install gopogh-linux-amd64 /usr/local/bin/gopogh else - curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.16/gopogh-darwin-amd64 && sudo install gopogh-darwin-amd64 /usr/local/bin/gopogh + curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.18/gopogh-darwin-amd64 && sudo install gopogh-darwin-amd64 /usr/local/bin/gopogh fi echo ">> Running gopogh" diff --git a/hack/jenkins/linux_integration_tests_none.sh b/hack/jenkins/linux_integration_tests_none.sh index a902ef15ee..0549fde508 100755 --- a/hack/jenkins/linux_integration_tests_none.sh +++ b/hack/jenkins/linux_integration_tests_none.sh @@ -49,7 +49,21 @@ sudo rm -rf /var/lib/minikube/* # Stop any leftover kubelet sudo systemctl is-active --quiet kubelet \ && echo "stopping kubelet" \ - && sudo systemctl stop kubelet + && sudo systemctl stop -f kubelet + + # conntrack is required for kubernetes 1.18 and higher for none driver +if ! conntrack --version &>/dev/null; then + echo "WARNING: contrack is not installed. will try to install." + sudo apt-get update -qq + sudo apt-get -qq -y install conntrack +fi + + # socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon +if ! which socat &>/dev/null; then + echo "WARNING: socat is not installed. will try to install." + sudo apt-get update -qq + sudo apt-get -qq -y install socat +fi mkdir -p cron && gsutil -m rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron || echo "FAILED TO GET CRON FILES" sudo install cron/cleanup_and_reboot_Linux.sh /etc/cron.hourly/cleanup_and_reboot || echo "FAILED TO INSTALL CLEANUP" diff --git a/hack/jenkins/preload_generation.sh b/hack/jenkins/preload_generation.sh new file mode 100644 index 0000000000..66e2ba1125 --- /dev/null +++ b/hack/jenkins/preload_generation.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +# Copyright 2016 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script builds all the minikube binary for all 3 platforms as well as Windows-installer and .deb +# This is intended to be run on a new release tag in order to build/upload the required files for a release + + +set -eux -o pipefail + +# Make sure the right golang version is installed based on Makefile +WANT_GOLANG_VERSION=$(grep '^GO_VERSION' Makefile | awk '{ print $3 }') +./hack/jenkins/installers/check_install_golang.sh $WANT_GOLANG_VERSION /usr/local + +make upload-preloaded-images-tar +make clean diff --git a/hack/jenkins/release_update_brew.sh b/hack/jenkins/release_update_brew.sh index 1ea538c8d6..ee10433ec5 100755 --- a/hack/jenkins/release_update_brew.sh +++ b/hack/jenkins/release_update_brew.sh @@ -45,6 +45,9 @@ export HOMEBREW_GITHUB_API_TOKEN="${access_token}" # sh -c "$(curl -fsSL https://raw.githubusercontent.com/Linuxbrew/install/master/install.sh)" export PATH=/home/linuxbrew/.linuxbrew/bin:$PATH +# avoid "error: you need to resolve your current index first" message +cd "${SRC_DIR}" + brew bump-formula-pr \ --strict minikube \ --revision="${revision}" \ diff --git a/hack/jenkins/windows_integration_test_hyperv.ps1 b/hack/jenkins/windows_integration_test_hyperv.ps1 index f1c4db8b9d..536c4e35cc 100644 --- a/hack/jenkins/windows_integration_test_hyperv.ps1 +++ b/hack/jenkins/windows_integration_test_hyperv.ps1 @@ -19,7 +19,7 @@ gsutil.cmd -m cp -r gs://minikube-builds/$env:MINIKUBE_LOCATION/testdata . ./out/minikube-windows-amd64.exe delete -out/e2e-windows-amd64.exe --expected-default-driver=hyperv -minikube-start-args="--driver=hyperv --hyperv-virtual-switch=primary-virtual-switch" -binary=out/minikube-windows-amd64.exe -test.v -test.timeout=65m +out/e2e-windows-amd64.exe -minikube-start-args="--driver=hyperv --hyperv-virtual-switch=primary-virtual-switch" -binary=out/minikube-windows-amd64.exe -test.v -test.timeout=65m $env:result=$lastexitcode # If the last exit code was 0->success, x>0->error If($env:result -eq 0){$env:status="success"} diff --git a/hack/jenkins/windows_integration_test_virtualbox.ps1 b/hack/jenkins/windows_integration_test_virtualbox.ps1 index 6d9c7f318b..2f5957301f 100644 --- a/hack/jenkins/windows_integration_test_virtualbox.ps1 +++ b/hack/jenkins/windows_integration_test_virtualbox.ps1 @@ -19,7 +19,7 @@ gsutil.cmd -m cp -r gs://minikube-builds/$env:MINIKUBE_LOCATION/testdata . ./out/minikube-windows-amd64.exe delete -out/e2e-windows-amd64.exe -minikube-start-args="--driver=virtualbox" -expected-default-driver=hyperv -binary=out/minikube-windows-amd64.exe -test.v -test.timeout=30m +out/e2e-windows-amd64.exe -minikube-start-args="--driver=virtualbox" -binary=out/minikube-windows-amd64.exe -test.v -test.timeout=30m $env:result=$lastexitcode # If the last exit code was 0->success, x>0->error If($env:result -eq 0){$env:status="success"} diff --git a/hack/kubernetes_version/update_kubernetes_version.go b/hack/kubernetes_version/update_kubernetes_version.go index 87466c6249..6bd5325c21 100644 --- a/hack/kubernetes_version/update_kubernetes_version.go +++ b/hack/kubernetes_version/update_kubernetes_version.go @@ -52,18 +52,18 @@ func main() { } mode := info.Mode() - re := regexp.MustCompile(`var DefaultKubernetesVersion = .*`) - f := re.ReplaceAllString(string(cf), "var DefaultKubernetesVersion = \""+v+"\"") + re := regexp.MustCompile(`DefaultKubernetesVersion = \".*`) + f := re.ReplaceAllString(string(cf), "DefaultKubernetesVersion = \""+v+"\"") - re = regexp.MustCompile(`var NewestKubernetesVersion = .*`) - f = re.ReplaceAllString(f, "var NewestKubernetesVersion = \""+v+"\"") + re = regexp.MustCompile(`NewestKubernetesVersion = \".*`) + f = re.ReplaceAllString(f, "NewestKubernetesVersion = \""+v+"\"") if err := ioutil.WriteFile(constantsFile, []byte(f), mode); err != nil { fmt.Println(err) os.Exit(1) } - testData := "../../pkg/minikube/bootstrapper/kubeadm/testdata" + testData := "../../pkg/minikube/bootstrapper/bsutil/testdata" err = filepath.Walk(testData, func(path string, info os.FileInfo, err error) error { if err != nil { diff --git a/hack/preload-images/generate.go b/hack/preload-images/generate.go new file mode 100644 index 0000000000..1a22e7404d --- /dev/null +++ b/hack/preload-images/generate.go @@ -0,0 +1,133 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + + "github.com/pkg/errors" + "k8s.io/minikube/pkg/drivers/kic" + "k8s.io/minikube/pkg/drivers/kic/oci" + "k8s.io/minikube/pkg/minikube/bootstrapper/bsutil" + "k8s.io/minikube/pkg/minikube/bootstrapper/images" + "k8s.io/minikube/pkg/minikube/command" + "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/driver" + "k8s.io/minikube/pkg/minikube/localpath" + "k8s.io/minikube/pkg/minikube/sysinit" +) + +func generateTarball(kubernetesVersion, containerRuntime, tarballFilename string) error { + defer func() { + if err := deleteMinikube(); err != nil { + fmt.Println(err) + } + }() + + driver := kic.NewDriver(kic.Config{ + KubernetesVersion: kubernetesVersion, + ContainerRuntime: driver.Docker, + OCIBinary: oci.Docker, + MachineName: profile, + ImageDigest: kic.BaseImage, + StorePath: localpath.MiniPath(), + CPU: 2, + Memory: 4000, + APIServerPort: 8080, + }) + + baseDir := filepath.Dir(driver.GetSSHKeyPath()) + defer os.Remove(baseDir) + + if err := os.MkdirAll(baseDir, 0755); err != nil { + return errors.Wrap(err, "mkdir") + } + if err := driver.Create(); err != nil { + return errors.Wrap(err, "creating kic driver") + } + + // Now, get images to pull + imgs, err := images.Kubeadm("", kubernetesVersion) + if err != nil { + return errors.Wrap(err, "kubeadm images") + } + + if containerRuntime != "docker" { // kic overlay image is only needed by containerd and cri-o https://github.com/kubernetes/minikube/issues/7428 + imgs = append(imgs, kic.OverlayImage) + } + + for _, img := range imgs { + cmd := exec.Command("docker", "exec", profile, "docker", "pull", img) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return errors.Wrapf(err, "downloading %s", img) + } + } + + // Transfer in k8s binaries + kcfg := config.KubernetesConfig{ + KubernetesVersion: kubernetesVersion, + } + runner := command.NewKICRunner(profile, driver.OCIBinary) + sm := sysinit.New(runner) + + if err := bsutil.TransferBinaries(kcfg, runner, sm); err != nil { + return errors.Wrap(err, "transferring k8s binaries") + } + // Create image tarball + if err := createImageTarball(tarballFilename); err != nil { + return errors.Wrap(err, "create tarball") + } + return copyTarballToHost(tarballFilename) +} + +func createImageTarball(tarballFilename string) error { + // directories to save into tarball + dirs := []string{ + fmt.Sprintf("./lib/docker/%s", dockerStorageDriver), + "./lib/docker/image", + "./lib/minikube/binaries", + } + args := []string{"exec", profile, "sudo", "tar", "-I", "lz4", "-C", "/var", "-cvf", tarballFilename} + args = append(args, dirs...) + cmd := exec.Command("docker", args...) + cmd.Stdout = os.Stdout + if err := cmd.Run(); err != nil { + return errors.Wrapf(err, "tarball cmd: %s", cmd.Args) + } + return nil +} + +func copyTarballToHost(tarballFilename string) error { + dest := filepath.Join("out/", tarballFilename) + cmd := exec.Command("docker", "cp", fmt.Sprintf("%s:/%s", profile, tarballFilename), dest) + cmd.Stdout = os.Stdout + if err := cmd.Run(); err != nil { + return errors.Wrapf(err, "cp cmd: %s", cmd.Args) + } + return nil +} + +func deleteMinikube() error { + cmd := exec.Command(minikubePath, "delete", "-p", profile) + cmd.Stdout = os.Stdout + return cmd.Run() +} diff --git a/hack/preload-images/kubernetes.go b/hack/preload-images/kubernetes.go new file mode 100644 index 0000000000..1114d39e12 --- /dev/null +++ b/hack/preload-images/kubernetes.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + + "github.com/golang/glog" + "github.com/google/go-github/github" +) + +// RecentK8sVersions returns the most recent k8s version, usually around 30 +func RecentK8sVersions() ([]string, error) { + client := github.NewClient(nil) + k8s := "kubernetes" + list, _, err := client.Repositories.ListReleases(context.Background(), k8s, k8s, &github.ListOptions{}) + if err != nil { + return nil, err + } + var releases []string + for _, r := range list { + releases = append(releases, r.GetTagName()) + } + glog.Infof("Got releases: %v", releases) + return releases, nil +} diff --git a/hack/preload-images/preload_images.go b/hack/preload-images/preload_images.go index 1c6c858331..37bbd678e7 100644 --- a/hack/preload-images/preload_images.go +++ b/hack/preload-images/preload_images.go @@ -20,20 +20,12 @@ import ( "bytes" "flag" "fmt" - "os" "os/exec" - "path/filepath" "strings" - "github.com/pkg/errors" - "k8s.io/minikube/pkg/drivers/kic" - "k8s.io/minikube/pkg/drivers/kic/oci" - "k8s.io/minikube/pkg/minikube/bootstrapper/bsutil" - "k8s.io/minikube/pkg/minikube/bootstrapper/images" - "k8s.io/minikube/pkg/minikube/command" - "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/driver" - "k8s.io/minikube/pkg/minikube/localpath" + "github.com/spf13/viper" + "k8s.io/minikube/pkg/minikube/download" + "k8s.io/minikube/pkg/minikube/exit" ) const ( @@ -42,123 +34,49 @@ const ( ) var ( - kubernetesVersion = "" - tarballFilename = "" - dockerStorageDriver = "" - preloadedTarballVersion = "" - containerRuntime = "" + dockerStorageDriver = "overlay2" + containerRuntimes = []string{"docker"} + k8sVersion string + k8sVersions []string ) func init() { - flag.StringVar(&kubernetesVersion, "kubernetes-version", "", "desired kubernetes version, for example `v1.17.2`") - flag.StringVar(&dockerStorageDriver, "docker-storage-driver", "overlay2", "docker storage driver backend") - flag.StringVar(&preloadedTarballVersion, "preloaded-tarball-version", "", "preloaded tarball version") - flag.StringVar(&containerRuntime, "container-runtime", "docker", "container runtime") - + flag.StringVar(&k8sVersion, "kubernetes-version", "", "desired kubernetes version, for example `v1.17.2`") flag.Parse() - tarballFilename = fmt.Sprintf("preloaded-images-k8s-%s-%s-%s-%s.tar.lz4", preloadedTarballVersion, kubernetesVersion, containerRuntime, dockerStorageDriver) + if k8sVersion != "" { + k8sVersions = append(k8sVersions, k8sVersion) + } + viper.Set("preload", "true") } func main() { if err := verifyDockerStorage(); err != nil { - fmt.Println(err) - os.Exit(1) + exit.WithError("Docker storage type is incompatible: %v\n", err) } - if err := executePreloadImages(); err != nil { - fmt.Println(err) - os.Exit(1) - } -} - -func executePreloadImages() error { - defer func() { - if err := deleteMinikube(); err != nil { - fmt.Println(err) - } - }() - - driver := kic.NewDriver(kic.Config{ - KubernetesVersion: kubernetesVersion, - ContainerRuntime: driver.Docker, - OCIBinary: oci.Docker, - MachineName: profile, - ImageDigest: kic.BaseImage, - StorePath: localpath.MiniPath(), - CPU: 2, - Memory: 4000, - APIServerPort: 8080, - }) - - baseDir := filepath.Dir(driver.GetSSHKeyPath()) - defer os.Remove(baseDir) - - if err := os.MkdirAll(baseDir, 0755); err != nil { - return errors.Wrap(err, "mkdir") - } - if err := driver.Create(); err != nil { - return errors.Wrap(err, "creating kic driver") - } - - // Now, get images to pull - imgs, err := images.Kubeadm("", kubernetesVersion) - if err != nil { - return errors.Wrap(err, "kubeadm images") - } - - for _, img := range append(imgs, kic.OverlayImage) { - cmd := exec.Command("docker", "exec", profile, "docker", "pull", img) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - return errors.Wrapf(err, "downloading %s", img) + if k8sVersions == nil { + var err error + k8sVersions, err = RecentK8sVersions() + if err != nil { + exit.WithError("Unable to get recent k8s versions: %v\n", err) } } - // Transfer in k8s binaries - kcfg := config.KubernetesConfig{ - KubernetesVersion: kubernetesVersion, + for _, kv := range k8sVersions { + for _, cr := range containerRuntimes { + tf := download.TarballName(kv, cr) + if download.PreloadExists(kv, cr) { + fmt.Printf("A preloaded tarball for k8s version %s already exists, skipping generation.\n", kv) + continue + } + fmt.Printf("A preloaded tarball for k8s version %s doesn't exist, generating now...\n", kv) + if err := generateTarball(kv, cr, tf); err != nil { + exit.WithError(fmt.Sprintf("generating tarball for k8s version %s with %s", kv, cr), err) + } + if err := uploadTarball(tf); err != nil { + exit.WithError(fmt.Sprintf("uploading tarball for k8s version %s with %s", kv, cr), err) + } + } } - runner := command.NewKICRunner(profile, driver.OCIBinary) - if err := bsutil.TransferBinaries(kcfg, runner); err != nil { - return errors.Wrap(err, "transferring k8s binaries") - } - // Create image tarball - if err := createImageTarball(); err != nil { - return errors.Wrap(err, "create tarball") - } - return copyTarballToHost() -} - -func createImageTarball() error { - dirs := []string{ - fmt.Sprintf("./lib/docker/%s", dockerStorageDriver), - "./lib/docker/image", - "./lib/minikube/binaries", - } - args := []string{"exec", profile, "sudo", "tar", "-I", "lz4", "-C", "/var", "-cvf", tarballFilename} - args = append(args, dirs...) - cmd := exec.Command("docker", args...) - cmd.Stdout = os.Stdout - if err := cmd.Run(); err != nil { - return errors.Wrapf(err, "tarball cmd: %s", cmd.Args) - } - return nil -} - -func copyTarballToHost() error { - dest := filepath.Join("out/", tarballFilename) - cmd := exec.Command("docker", "cp", fmt.Sprintf("%s:/%s", profile, tarballFilename), dest) - cmd.Stdout = os.Stdout - if err := cmd.Run(); err != nil { - return errors.Wrapf(err, "cp cmd: %s", cmd.Args) - } - return nil -} - -func deleteMinikube() error { - cmd := exec.Command(minikubePath, "delete", "-p", profile) - cmd.Stdout = os.Stdout - return cmd.Run() } func verifyDockerStorage() error { diff --git a/hack/preload-images/upload.go b/hack/preload-images/upload.go new file mode 100644 index 0000000000..89bbd8004b --- /dev/null +++ b/hack/preload-images/upload.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "os/exec" + "path" + + "github.com/pkg/errors" + "k8s.io/minikube/pkg/minikube/download" +) + +func uploadTarball(tarballFilename string) error { + // Upload tarball to GCS + hostPath := path.Join("out/", tarballFilename) + gcsDest := fmt.Sprintf("gs://%s", download.PreloadBucket) + cmd := exec.Command("gsutil", "cp", hostPath, gcsDest) + fmt.Printf("Running: %v\n", cmd.Args) + if output, err := cmd.CombinedOutput(); err != nil { + return errors.Wrapf(err, "uploading %s to GCS bucket: %v\n%s", hostPath, err, string(output)) + } + // Make tarball public to all users + gcsPath := fmt.Sprintf("%s/%s", gcsDest, tarballFilename) + cmd = exec.Command("gsutil", "acl", "ch", "-u", "AllUsers:R", gcsPath) + fmt.Printf("Running: %v\n", cmd.Args) + if output, err := cmd.CombinedOutput(); err != nil { + fmt.Printf(`Failed to update ACLs on this tarball in GCS. Please run + +gsutil acl ch -u AllUsers:R %s + +manually to make this link public, or rerun this script to rebuild and reupload the tarball. + + `, gcsPath) + return errors.Wrapf(err, "uploading %s to GCS bucket: %v\n%s", hostPath, err, string(output)) + } + return nil +} diff --git a/netlify.toml b/netlify.toml index d335d812be..bbf6fcf397 100644 --- a/netlify.toml +++ b/netlify.toml @@ -4,7 +4,7 @@ publish = "site/public/" command = "pwd && cd themes/docsy && git submodule update -f --init && cd ../.. && hugo" [build.environment] -HUGO_VERSION = "0.59.0" +HUGO_VERSION = "0.68.3" [context.production.environment] HUGO_ENV = "production" diff --git a/pkg/addons/addons.go b/pkg/addons/addons.go index d644aafc88..b39ee3f601 100644 --- a/pkg/addons/addons.go +++ b/pkg/addons/addons.go @@ -19,9 +19,11 @@ package addons import ( "fmt" "path" + "runtime" "sort" "strconv" "strings" + "sync" "time" "github.com/golang/glog" @@ -34,37 +36,55 @@ import ( "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/storageclass" + "k8s.io/minikube/pkg/util/retry" ) // defaultStorageClassProvisioner is the name of the default storage class provisioner const defaultStorageClassProvisioner = "standard" -// Set sets a value -func Set(name, value, profile string) error { - glog.Infof("Setting %s=%s in profile %q", name, value, profile) +// RunCallbacks runs all actions associated to an addon, but does not set it (thread-safe) +func RunCallbacks(cc *config.ClusterConfig, name string, value string) error { + glog.Infof("Setting %s=%s in profile %q", name, value, cc.Name) a, valid := isAddonValid(name) if !valid { return errors.Errorf("%s is not a valid addon", name) } - cc, err := config.Load(profile) - if err != nil { - return errors.Wrap(err, "loading profile") - } - // Run any additional validations for this property if err := run(cc, name, value, a.validations); err != nil { return errors.Wrap(err, "running validations") } - if err := a.set(cc, name, value); err != nil { - return errors.Wrap(err, "setting new value of addon") - } - // Run any callbacks for this property if err := run(cc, name, value, a.callbacks); err != nil { return errors.Wrap(err, "running callbacks") } + return nil +} + +// Set sets a value in the config (not threadsafe) +func Set(cc *config.ClusterConfig, name string, value string) error { + a, valid := isAddonValid(name) + if !valid { + return errors.Errorf("%s is not a valid addon", name) + } + return a.set(cc, name, value) +} + +// SetAndSave sets a value and saves the config +func SetAndSave(profile string, name string, value string) error { + cc, err := config.Load(profile) + if err != nil { + return errors.Wrap(err, "loading profile") + } + + if err := RunCallbacks(cc, name, value); err != nil { + return errors.Wrap(err, "run callbacks") + } + + if err := Set(cc, name, value); err != nil { + return errors.Wrap(err, "set") + } glog.Infof("Writing out %q config to set %s=%v...", profile, name, value) return config.Write(profile, cc) @@ -85,7 +105,7 @@ func run(cc *config.ClusterConfig, name string, value string, fns []setFn) error return nil } -// SetBool sets a bool value +// SetBool sets a bool value in the config (not threadsafe) func SetBool(cc *config.ClusterConfig, name string, val string) error { b, err := strconv.ParseBool(val) if err != nil { @@ -108,19 +128,24 @@ func enableOrDisableAddon(cc *config.ClusterConfig, name string, val string) err addon := assets.Addons[name] // check addon status before enabling/disabling it - alreadySet, err := isAddonAlreadySet(addon, enable, cc.Name) - if err != nil { - out.ErrT(out.Conflict, "{{.error}}", out.V{"error": err}) - return err - } - - if alreadySet { + if isAddonAlreadySet(cc, addon, enable) { glog.Warningf("addon %s should already be in state %v", name, val) if !enable { return nil } } + // to match both ingress and ingress-dns adons + if strings.HasPrefix(name, "ingress") && enable && driver.IsKIC(cc.Driver) && runtime.GOOS != "linux" { + exit.UsageT(`Due to {{.driver_name}} networking limitations on {{.os_name}}, {{.addon_name}} addon is not supported for this driver. +Alternatively to use this addon you can use a vm-based driver: + + 'minikube start --vm=true' + +To track the update on this work in progress feature please check: +https://github.com/kubernetes/minikube/issues/7332`, out.V{"driver_name": cc.Driver, "os_name": runtime.GOOS, "addon_name": name}) + } + if strings.HasPrefix(name, "istio") && enable { minMem := 8192 minCPUs := 4 @@ -147,7 +172,7 @@ func enableOrDisableAddon(cc *config.ClusterConfig, name string, val string) err mName := driver.MachineName(*cc, cp) host, err := machine.LoadHost(api, mName) if err != nil || !machine.IsRunning(api, mName) { - glog.Warningf("%q is not running, writing %s=%v to disk and skipping enablement (err=%v)", mName, addon.Name(), enable, err) + glog.Warningf("%q is not running, setting %s=%v and skipping enablement (err=%v)", mName, addon.Name(), enable, err) return nil } @@ -160,19 +185,17 @@ func enableOrDisableAddon(cc *config.ClusterConfig, name string, val string) err return enableOrDisableAddonInternal(cc, addon, cmd, data, enable) } -func isAddonAlreadySet(addon *assets.Addon, enable bool, profile string) (bool, error) { - addonStatus, err := addon.IsEnabled(profile) - if err != nil { - return false, errors.Wrap(err, "is enabled") +func isAddonAlreadySet(cc *config.ClusterConfig, addon *assets.Addon, enable bool) bool { + enabled := addon.IsEnabled(cc) + if enabled && enable { + return true } - if addonStatus && enable { - return true, nil - } else if !addonStatus && !enable { - return true, nil + if !enabled && !enable { + return true } - return false, nil + return false } func enableOrDisableAddonInternal(cc *config.ClusterConfig, addon *assets.Addon, cmd command.Runner, data interface{}, enable bool) error { @@ -184,7 +207,7 @@ func enableOrDisableAddonInternal(cc *config.ClusterConfig, addon *assets.Addon, if addon.IsTemplate() { f, err = addon.Evaluate(data) if err != nil { - return errors.Wrapf(err, "evaluate bundled addon %s asset", addon.GetAssetName()) + return errors.Wrapf(err, "evaluate bundled addon %s asset", addon.GetSourcePath()) } } else { @@ -211,13 +234,17 @@ func enableOrDisableAddonInternal(cc *config.ClusterConfig, addon *assets.Addon, } command := kubectlCommand(cc, deployFiles, enable) - glog.Infof("Running: %v", command) - rr, err := cmd.RunCmd(command) - if err != nil { - return errors.Wrapf(err, "addon apply") + + // Retry, because sometimes we race against an apiserver restart + apply := func() error { + _, err := cmd.RunCmd(command) + if err != nil { + glog.Warningf("apply failed, will retry: %v", err) + } + return err } - glog.Infof("output:\n%s", rr.Output()) - return nil + + return retry.Expo(apply, 1*time.Second, time.Second*30) } // enableOrDisableStorageClasses enables or disables storage classes @@ -270,7 +297,10 @@ func enableOrDisableStorageClasses(cc *config.ClusterConfig, name string, val st } // Start enables the default addons for a profile, plus any additional -func Start(profile string, toEnable map[string]bool, additional []string) { +func Start(wg *sync.WaitGroup, cc *config.ClusterConfig, toEnable map[string]bool, additional []string) { + wg.Add(1) + defer wg.Done() + start := time.Now() glog.Infof("enableAddons start: toEnable=%v, additional=%s", toEnable, additional) defer func() { @@ -279,11 +309,7 @@ func Start(profile string, toEnable map[string]bool, additional []string) { // Get the default values of any addons not saved to our config for name, a := range assets.Addons { - defaultVal, err := a.IsEnabled(profile) - if err != nil { - glog.Errorf("is-enabled failed for %q: %v", a.Name(), err) - continue - } + defaultVal := a.IsEnabled(cc) _, exists := toEnable[name] if !exists { @@ -304,12 +330,25 @@ func Start(profile string, toEnable map[string]bool, additional []string) { } sort.Strings(toEnableList) + var awg sync.WaitGroup + out.T(out.AddonEnable, "Enabling addons: {{.addons}}", out.V{"addons": strings.Join(toEnableList, ", ")}) for _, a := range toEnableList { - err := Set(a, "true", profile) - if err != nil { - // Intentionally non-fatal - out.WarningT("Enabling '{{.name}}' returned an error: {{.error}}", out.V{"name": a, "error": err}) + awg.Add(1) + go func(name string) { + err := RunCallbacks(cc, name, "true") + if err != nil { + out.WarningT("Enabling '{{.name}}' returned an error: {{.error}}", out.V{"name": name, "error": err}) + } + awg.Done() + }(a) + } + + // Wait until all of the addons are enabled before updating the config (not thread safe) + awg.Wait() + for _, a := range toEnableList { + if err := Set(cc, a, "true"); err != nil { + glog.Errorf("store failed: %v", err) } } } diff --git a/pkg/addons/addons_test.go b/pkg/addons/addons_test.go index 6862aaf542..004d0f5047 100644 --- a/pkg/addons/addons_test.go +++ b/pkg/addons/addons_test.go @@ -20,6 +20,7 @@ import ( "io/ioutil" "os" "path/filepath" + "sync" "testing" "k8s.io/minikube/pkg/minikube/assets" @@ -59,47 +60,42 @@ func createTestProfile(t *testing.T) string { } func TestIsAddonAlreadySet(t *testing.T) { - profile := createTestProfile(t) - if err := Set("registry", "true", profile); err != nil { + cc := &config.ClusterConfig{Name: "test"} + + if err := Set(cc, "registry", "true"); err != nil { t.Errorf("unable to set registry true: %v", err) } - enabled, err := assets.Addons["registry"].IsEnabled(profile) - if err != nil { - t.Errorf("registry: %v", err) - } - if !enabled { + if !assets.Addons["registry"].IsEnabled(cc) { t.Errorf("expected registry to be enabled") } - enabled, err = assets.Addons["ingress"].IsEnabled(profile) - if err != nil { - t.Errorf("ingress: %v", err) - } - if enabled { + if assets.Addons["ingress"].IsEnabled(cc) { t.Errorf("expected ingress to not be enabled") } } func TestDisableUnknownAddon(t *testing.T) { - profile := createTestProfile(t) - if err := Set("InvalidAddon", "false", profile); err == nil { + cc := &config.ClusterConfig{Name: "test"} + + if err := Set(cc, "InvalidAddon", "false"); err == nil { t.Fatalf("Disable did not return error for unknown addon") } } func TestEnableUnknownAddon(t *testing.T) { - profile := createTestProfile(t) - if err := Set("InvalidAddon", "true", profile); err == nil { + cc := &config.ClusterConfig{Name: "test"} + + if err := Set(cc, "InvalidAddon", "true"); err == nil { t.Fatalf("Enable did not return error for unknown addon") } } -func TestEnableAndDisableAddon(t *testing.T) { +func TestSetAndSave(t *testing.T) { profile := createTestProfile(t) // enable - if err := Set("dashboard", "true", profile); err != nil { + if err := SetAndSave(profile, "dashboard", "true"); err != nil { t.Errorf("Disable returned unexpected error: " + err.Error()) } @@ -112,7 +108,7 @@ func TestEnableAndDisableAddon(t *testing.T) { } // disable - if err := Set("dashboard", "false", profile); err != nil { + if err := SetAndSave(profile, "dashboard", "false"); err != nil { t.Errorf("Disable returned unexpected error: " + err.Error()) } @@ -126,14 +122,18 @@ func TestEnableAndDisableAddon(t *testing.T) { } func TestStart(t *testing.T) { - profile := createTestProfile(t) - Start(profile, map[string]bool{}, []string{"dashboard"}) - - enabled, err := assets.Addons["dashboard"].IsEnabled(profile) - if err != nil { - t.Errorf("dashboard: %v", err) + cc := &config.ClusterConfig{ + Name: "start", + CPUs: 2, + Memory: 2500, + KubernetesConfig: config.KubernetesConfig{}, } - if !enabled { + + var wg sync.WaitGroup + Start(&wg, cc, map[string]bool{}, []string{"dashboard"}) + wg.Wait() + + if !assets.Addons["dashboard"].IsEnabled(cc) { t.Errorf("expected dashboard to be enabled") } } diff --git a/pkg/addons/config.go b/pkg/addons/config.go index c354d54041..4a39951f0d 100644 --- a/pkg/addons/config.go +++ b/pkg/addons/config.go @@ -112,6 +112,13 @@ var Addons = []*Addon{ set: SetBool, callbacks: []setFn{enableOrDisableAddon}, }, + { + name: "registry-aliases", + set: SetBool, + callbacks: []setFn{enableOrDisableAddon}, + //TODO - add other settings + //TODO check if registry addon is enabled + }, { name: "storage-provisioner", set: SetBool, diff --git a/pkg/drivers/hyperkit/iso_test.go b/pkg/drivers/hyperkit/iso_test.go new file mode 100644 index 0000000000..8dda1e4a6e --- /dev/null +++ b/pkg/drivers/hyperkit/iso_test.go @@ -0,0 +1,91 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hyperkit + +import ( + "io/ioutil" + "os" + "testing" +) + +func TestExtractFile(t *testing.T) { + testDir, err := ioutil.TempDir(os.TempDir(), "") + if nil != err { + return + } + defer os.Remove(testDir) + + tests := []struct { + name string + isoPath string + srcPath string + destPath string + expectedError bool + }{ + { + name: "all is right", + isoPath: "iso_test.iso", + srcPath: "/test1.txt", + destPath: testDir + "/test1.txt", + expectedError: false, + }, + { + name: "isoPath is error", + isoPath: "tests.iso", + srcPath: "/test1.txt", + destPath: testDir + "/test1.txt", + expectedError: true, + }, + { + name: "srcPath is empty", + isoPath: "iso_tests.iso", + srcPath: "", + destPath: testDir + "/test1.txt", + expectedError: true, + }, + { + name: "srcPath is error", + isoPath: "iso_tests.iso", + srcPath: "/t1.txt", + destPath: testDir + "/test1.txt", + expectedError: true, + }, + { + name: "destPath is empty", + isoPath: "iso_test.iso", + srcPath: "/test1.txt", + destPath: "", + expectedError: true, + }, + { + name: "find files in a folder", + isoPath: "./iso_test.iso", + srcPath: "/test2/test2.txt", + destPath: testDir + "/test2.txt", + expectedError: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := ExtractFile(tt.isoPath, tt.srcPath, tt.destPath) + if (nil != err) != tt.expectedError { + t.Errorf("expectedError = %v, get = %v", tt.expectedError, err) + return + } + }) + } +} diff --git a/pkg/drivers/hyperkit/iso_test.iso b/pkg/drivers/hyperkit/iso_test.iso new file mode 100644 index 0000000000..dbed69dd41 Binary files /dev/null and b/pkg/drivers/hyperkit/iso_test.iso differ diff --git a/pkg/drivers/kic/kic.go b/pkg/drivers/kic/kic.go index 2488ca8e86..0378c78626 100644 --- a/pkg/drivers/kic/kic.go +++ b/pkg/drivers/kic/kic.go @@ -17,12 +17,12 @@ limitations under the License. package kic import ( - "context" "fmt" "net" "os/exec" "strconv" "strings" + "sync" "time" "github.com/docker/machine/libmachine/drivers" @@ -36,7 +36,9 @@ import ( "k8s.io/minikube/pkg/minikube/assets" "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/constants" + "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/minikube/download" + "k8s.io/minikube/pkg/minikube/sysinit" ) // Driver represents a kic driver https://minikube.sigs.k8s.io/docs/reference/drivers/docker @@ -111,6 +113,28 @@ func (d *Driver) Create() error { } } + if err := oci.PrepareContainerNode(params); err != nil { + return errors.Wrap(err, "setting up container node") + } + + var waitForPreload sync.WaitGroup + waitForPreload.Add(1) + go func() { + defer waitForPreload.Done() + // If preload doesn't exist, don't bother extracting tarball to volume + if !download.PreloadExists(d.NodeConfig.KubernetesVersion, d.NodeConfig.ContainerRuntime) { + return + } + t := time.Now() + glog.Infof("Starting extracting preloaded images to volume") + // Extract preloaded images to container + if err := oci.ExtractTarballToVolume(download.TarballPath(d.NodeConfig.KubernetesVersion, d.NodeConfig.ContainerRuntime), params.Name, BaseImage); err != nil { + glog.Infof("Unable to extract preloaded tarball to volume: %v", err) + } else { + glog.Infof("duration metric: took %f seconds to extract preloaded images to volume", time.Since(t).Seconds()) + } + }() + if err := oci.CreateContainerNode(params); err != nil { return errors.Wrap(err, "create kic node") } @@ -119,15 +143,7 @@ func (d *Driver) Create() error { return errors.Wrap(err, "prepare kic ssh") } - t := time.Now() - glog.Infof("Starting extracting preloaded images to volume") - // Extract preloaded images to container - if err := oci.ExtractTarballToVolume(download.TarballPath(d.NodeConfig.KubernetesVersion), params.Name, BaseImage); err != nil { - glog.Infof("Unable to extract preloaded tarball to volume: %v", err) - } else { - glog.Infof("Took %f seconds to extract preloaded images to volume", time.Since(t).Seconds()) - } - + waitForPreload.Wait() return nil } @@ -214,20 +230,12 @@ func (d *Driver) GetURL() (string, error) { // GetState returns the state that the host is in (running, stopped, etc) func (d *Driver) GetState() (state.State, error) { - // allow no more than 2 seconds for this. when this takes long this means deadline passed - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - - cmd := exec.CommandContext(ctx, d.NodeConfig.OCIBinary, "inspect", "-f", "{{.State.Status}}", d.MachineName) - out, err := cmd.CombinedOutput() - if ctx.Err() == context.DeadlineExceeded { - glog.Errorf("GetState for %s took longer than normal. Restarting your %s daemon might fix this issue.", d.MachineName, d.OCIBinary) - return state.Error, fmt.Errorf("inspect %s timeout", d.MachineName) - } - o := strings.TrimSpace(string(out)) + out, err := oci.WarnIfSlow(d.NodeConfig.OCIBinary, "inspect", "-f", "{{.State.Status}}", d.MachineName) if err != nil { - return state.Error, errors.Wrapf(err, "%s: %s", strings.Join(cmd.Args, " "), o) + return state.Error, err } + + o := strings.TrimSpace(string(out)) switch o { case "running": return state.Running, nil @@ -246,6 +254,11 @@ func (d *Driver) GetState() (state.State, error) { // Kill stops a host forcefully, including any containers that we are managing. func (d *Driver) Kill() error { + // on init this doesn't get filled when called from cmd + d.exec = command.NewKICRunner(d.MachineName, d.OCIBinary) + if err := sysinit.New(d.exec).ForceStop("kubelet"); err != nil { + glog.Warningf("couldn't force stop kubelet. will continue with kill anyways: %v", err) + } cmd := exec.Command(d.NodeConfig.OCIBinary, "kill", d.MachineName) if err := cmd.Run(); err != nil { return errors.Wrapf(err, "killing kic node %s", d.MachineName) @@ -312,6 +325,42 @@ func (d *Driver) Start() error { // Stop a host gracefully, including any containers that we are managing. func (d *Driver) Stop() error { + // on init this doesn't get filled when called from cmd + d.exec = command.NewKICRunner(d.MachineName, d.OCIBinary) + // docker does not send right SIG for systemd to know to stop the systemd. + // to avoid bind address be taken on an upgrade. more info https://github.com/kubernetes/minikube/issues/7171 + if err := sysinit.New(d.exec).Stop("kubelet"); err != nil { + glog.Warningf("couldn't stop kubelet. will continue with stop anyways: %v", err) + if err := sysinit.New(d.exec).ForceStop("kubelet"); err != nil { + glog.Warningf("couldn't force stop kubelet. will continue with stop anyways: %v", err) + } + } + + runtime, err := cruntime.New(cruntime.Config{Type: d.NodeConfig.ContainerRuntime, Runner: d.exec}) + if err != nil { // won't return error because: + // even though we can't stop the cotainers inside, we still wanna stop the minikube container itself + glog.Errorf("unable to get container runtime: %v", err) + } else { + containers, err := runtime.ListContainers(cruntime.ListOptions{Namespaces: constants.DefaultNamespaces}) + if err != nil { + glog.Infof("unable list containers : %v", err) + } + if len(containers) > 0 { + if err := runtime.StopContainers(containers); err != nil { + glog.Infof("unable to stop containers : %v", err) + } + if err := runtime.KillContainers(containers); err != nil { + glog.Errorf("unable to kill containers : %v", err) + } + } + glog.Infof("successfully stopped kubernetes!") + + } + + if err := killAPIServerProc(d.exec); err != nil { + glog.Warningf("couldn't stop kube-apiserver proc: %v", err) + } + cmd := exec.Command(d.NodeConfig.OCIBinary, "stop", d.MachineName) if err := cmd.Run(); err != nil { return errors.Wrapf(err, "stopping %s", d.MachineName) @@ -323,3 +372,20 @@ func (d *Driver) Stop() error { func (d *Driver) RunSSHCommandFromDriver() error { return fmt.Errorf("driver does not support RunSSHCommandFromDriver commands") } + +// killAPIServerProc will kill an api server proc if it exists +// to ensure this never happens https://github.com/kubernetes/minikube/issues/7521 +func killAPIServerProc(runner command.Runner) error { + // first check if it exists + rr, err := runner.RunCmd(exec.Command("pgrep", "kube-apiserver")) + if err == nil { // this means we might have a running kube-apiserver + pid, err := strconv.Atoi(rr.Stdout.String()) + if err == nil { // this means we have a valid pid + glog.Warningf("Found a kube-apiserver running with pid %d, will try to kill the proc", pid) + if _, err = runner.RunCmd(exec.Command("pkill", "-9", string(pid))); err != nil { + return errors.Wrap(err, "kill") + } + } + } + return nil +} diff --git a/pkg/drivers/kic/oci/oci.go b/pkg/drivers/kic/oci/oci.go index df5117164f..f1e53843eb 100644 --- a/pkg/drivers/kic/oci/oci.go +++ b/pkg/drivers/kic/oci/oci.go @@ -29,6 +29,7 @@ import ( "github.com/pkg/errors" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/localpath" + "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/util/retry" "fmt" @@ -83,6 +84,19 @@ func DeleteContainer(ociBin string, name string) error { return nil } +// PrepareContainerNode sets up the container node before CreateContainerNode is called. +// For the docker runtime, it creates a docker volume which will be mounted into kic +func PrepareContainerNode(p CreateParams) error { + if p.OCIBinary != Docker { + return nil + } + if err := createDockerVolume(p.Name, p.Name); err != nil { + return errors.Wrapf(err, "creating volume for %s container", p.Name) + } + glog.Infof("Successfully created a docker volume %s", p.Name) + return nil +} + // CreateContainerNode creates a new container node func CreateContainerNode(p CreateParams) error { runArgs := []string{ @@ -121,10 +135,6 @@ func CreateContainerNode(p CreateParams) error { runArgs = append(runArgs, "--volume", fmt.Sprintf("%s:/var:exec", hostVarVolPath)) } if p.OCIBinary == Docker { - if err := createDockerVolume(p.Name, p.Name); err != nil { - return errors.Wrapf(err, "creating volume for %s container", p.Name) - } - glog.Infof("Successfully created a docker volume %s", p.Name) runArgs = append(runArgs, "--volume", fmt.Sprintf("%s:/var", p.Name)) // setting resource limit in privileged mode is only supported by docker // podman error: "Error: invalid configuration, cannot set resources with rootless containers not using cgroups v2 unified mode" @@ -156,12 +166,13 @@ func CreateContainerNode(p CreateParams) error { if s != "running" { return fmt.Errorf("temporary error created container %q is not running yet", p.Name) } + glog.Infof("the created container %q has a running status.", p.Name) return nil } - // retry up to up 5 seconds to make sure the created container status is running. - if err := retry.Expo(checkRunning, 13*time.Millisecond, time.Second*5); err != nil { - glog.Warningf("The created container %q failed to report to be running in 5 seconds.", p.Name) + // retry up to up 13 seconds to make sure the created container status is running. + if err := retry.Expo(checkRunning, 13*time.Millisecond, time.Second*13); err != nil { + return errors.Wrapf(err, "check container %q running", p.Name) } return nil @@ -231,19 +242,44 @@ func ContainerID(ociBinary string, nameOrID string) (string, error) { return string(out), err } -// ContainerExists checks if container name exists (either running or exited) -func ContainerExists(ociBin string, name string) (bool, error) { - // allow no more than 3 seconds for this. - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) - defer cancel() +// WarnIfSlow runs an oci command, warning about performance issues +func WarnIfSlow(args ...string) ([]byte, error) { + killTime := 19 * time.Second + warnTime := 2 * time.Second - cmd := exec.CommandContext(ctx, ociBin, "ps", "-a", "--format", "{{.Names}}") - out, err := cmd.CombinedOutput() - - if ctx.Err() == context.DeadlineExceeded { - return false, fmt.Errorf("time out running %s ps -a", ociBin) + if args[1] == "volume" || args[1] == "ps" { // volume and ps requires more time than inspect + killTime = 30 * time.Second + warnTime = 3 * time.Second } + ctx, cancel := context.WithTimeout(context.Background(), killTime) + defer cancel() + + start := time.Now() + glog.Infof("executing with %s timeout: %v", args, killTime) + cmd := exec.CommandContext(ctx, args[0], args[1:]...) + stdout, err := cmd.Output() + d := time.Since(start) + if d > warnTime { + out.WarningT(`Executing "{{.command}}" took an unusually long time: {{.duration}}`, out.V{"command": strings.Join(cmd.Args, " "), "duration": d}) + out.ErrT(out.Tip, `Restarting the {{.name}} service may improve performance.`, out.V{"name": args[0]}) + } + + if ctx.Err() == context.DeadlineExceeded { + return stdout, fmt.Errorf("%q timed out after %s", strings.Join(cmd.Args, " "), killTime) + } + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + return stdout, fmt.Errorf("%q failed: %v: %s", strings.Join(cmd.Args, " "), exitErr, exitErr.Stderr) + } + return stdout, fmt.Errorf("%q failed: %v", strings.Join(cmd.Args, " "), err) + } + return stdout, nil +} + +// ContainerExists checks if container name exists (either running or exited) +func ContainerExists(ociBin string, name string) (bool, error) { + out, err := WarnIfSlow(ociBin, "ps", "-a", "--format", "{{.Names}}") if err != nil { return false, errors.Wrapf(err, string(out)) } @@ -409,12 +445,10 @@ func withPortMappings(portMappings []PortMapping) createOpt { // listContainersByLabel returns all the container names with a specified label func listContainersByLabel(ociBinary string, label string) ([]string, error) { - - // allow no more than 5 seconds for docker ps - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - cmd := exec.CommandContext(ctx, ociBinary, "ps", "-a", "--filter", fmt.Sprintf("label=%s", label), "--format", "{{.Names}}") - stdout, err := cmd.Output() + stdout, err := WarnIfSlow(ociBinary, "ps", "-a", "--filter", fmt.Sprintf("label=%s", label), "--format", "{{.Names}}") + if err != nil { + return nil, err + } s := bufio.NewScanner(bytes.NewReader(stdout)) var names []string for s.Scan() { @@ -447,21 +481,6 @@ func PointToHostDockerDaemon() error { // ContainerStatus returns status of a container running,exited,... func ContainerStatus(ociBin string, name string) (string, error) { - // allow no more than 2 seconds for this. when this takes long this means deadline passed - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - - cmd := exec.CommandContext(ctx, ociBin, "inspect", name, "--format={{.State.Status}}") - out, err := cmd.CombinedOutput() - - if ctx.Err() == context.DeadlineExceeded { - glog.Warningf("%s inspect %s took longer than normal. Restarting your %s daemon might fix this issue.", ociBin, name, ociBin) - return strings.TrimSpace(string(out)), fmt.Errorf("inspect %s timeout", name) - } - - if err != nil { - return string(out), errors.Wrapf(err, "inspecting container: output %s", out) - } - - return strings.TrimSpace(string(out)), nil + out, err := WarnIfSlow(ociBin, "inspect", name, "--format={{.State.Status}}") + return strings.TrimSpace(string(out)), err } diff --git a/pkg/drivers/kic/oci/volumes.go b/pkg/drivers/kic/oci/volumes.go index 58c13b9621..2fbd9f32b0 100644 --- a/pkg/drivers/kic/oci/volumes.go +++ b/pkg/drivers/kic/oci/volumes.go @@ -19,11 +19,9 @@ package oci import ( "bufio" "bytes" - "context" "fmt" "os/exec" "strings" - "time" "github.com/golang/glog" "github.com/pkg/errors" @@ -42,16 +40,8 @@ func DeleteAllVolumesByLabel(ociBin string, label string) []error { } for _, v := range vs { - // allow no more than 3 seconds for this. when this takes long this means deadline passed - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) - defer cancel() - cmd := exec.CommandContext(ctx, ociBin, "volume", "rm", "--force", v) - if ctx.Err() == context.DeadlineExceeded { - glog.Warningf("removing volume with label %s took longer than normal. Restarting your %s daemon might fix this issue.", label, ociBin) - deleteErrs = append(deleteErrs, fmt.Errorf("delete deadline exceeded for %s", label)) - } - if out, err := cmd.CombinedOutput(); err != nil { - deleteErrs = append(deleteErrs, fmt.Errorf("deleting volume %s: output: %s", v, string(out))) + if _, err := WarnIfSlow(ociBin, "volume", "rm", "--force", v); err != nil { + deleteErrs = append(deleteErrs, fmt.Errorf("deleting %q", v)) } } @@ -65,19 +55,8 @@ func PruneAllVolumesByLabel(ociBin string, label string) []error { var deleteErrs []error glog.Infof("trying to prune all %s volumes with label %s", ociBin, label) - // allow no more than 3 seconds for this. when this takes long this means deadline passed - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) - defer cancel() - - // try to prune afterwards just in case delete didn't go through - cmd := exec.CommandContext(ctx, ociBin, "volume", "prune", "-f", "--filter", "label="+label) - if out, err := cmd.CombinedOutput(); err != nil { - deleteErrs = append(deleteErrs, errors.Wrapf(err, "prune volume by label %s: %s", label, string(out))) - } - - if ctx.Err() == context.DeadlineExceeded { - glog.Warningf("pruning volume with label %s took longer than normal. Restarting your %s daemon might fix this issue.", label, ociBin) - deleteErrs = append(deleteErrs, fmt.Errorf("prune deadline exceeded for %s", label)) + if _, err := WarnIfSlow(ociBin, "volume", "prune", "-f", "--filter", "label="+label); err != nil { + deleteErrs = append(deleteErrs, errors.Wrapf(err, "prune volume by label %s", label)) } return deleteErrs diff --git a/pkg/drivers/kic/types.go b/pkg/drivers/kic/types.go index 32e2950396..2b01019b2e 100644 --- a/pkg/drivers/kic/types.go +++ b/pkg/drivers/kic/types.go @@ -30,13 +30,12 @@ const ( DefaultPodCIDR = "10.244.0.0/16" // Version is the current version of kic - Version = "v0.0.7" + Version = "v0.0.9" // SHA of the kic base image - baseImageSHA = "a6f288de0e5863cdeab711fa6bafa38ee7d8d285ca14216ecf84fcfb07c7d176" - + baseImageSHA = "82a826cc03c3e59ead5969b8020ca138de98f366c1907293df91fc57205dbb53" // OverlayImage is the cni plugin used for overlay image, created by kind. // CNI plugin image used for kic drivers created by kind. - OverlayImage = "kindest/kindnetd:0.5.3" + OverlayImage = "kindest/kindnetd:0.5.4" ) var ( diff --git a/pkg/drivers/none/none.go b/pkg/drivers/none/none.go index 2fd3565741..1049f713fe 100644 --- a/pkg/drivers/none/none.go +++ b/pkg/drivers/none/none.go @@ -18,10 +18,7 @@ package none import ( "fmt" - "net" "os/exec" - "strings" - "time" "github.com/docker/machine/libmachine/drivers" "github.com/docker/machine/libmachine/state" @@ -34,8 +31,8 @@ import ( "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/minikube/kubeconfig" + "k8s.io/minikube/pkg/minikube/sysinit" "k8s.io/minikube/pkg/minikube/vmpath" - "k8s.io/minikube/pkg/util/retry" ) // cleanupPaths are paths to be removed by cleanup, and are used by both kubeadm and minikube. @@ -128,20 +125,14 @@ func (d *Driver) GetURL() (string, error) { // GetState returns the state that the host is in (running, stopped, etc) func (d *Driver) GetState() (state.State, error) { - glog.Infof("GetState called") - ip, err := d.GetIP() - if err != nil { - return state.Error, err - } - - port, err := kubeconfig.Port(d.BaseDriver.MachineName) + hostname, port, err := kubeconfig.Endpoint(d.BaseDriver.MachineName) if err != nil { glog.Warningf("unable to get port: %v", err) port = constants.APIServerPort } // Confusing logic, as libmachine.Stop will loop until the state == Stopped - ast, err := kverify.APIServerStatus(d.exec, net.ParseIP(ip), port) + ast, err := kverify.APIServerStatus(d.exec, hostname, port) if err != nil { return ast, err } @@ -151,13 +142,13 @@ func (d *Driver) GetState() (state.State, error) { return state.Running, nil } - return kverify.KubeletStatus(d.exec) + return kverify.KubeletStatus(d.exec), nil } // Kill stops a host forcefully, including any containers that we are managing. func (d *Driver) Kill() error { - if err := stopKubelet(d.exec); err != nil { - return errors.Wrap(err, "kubelet") + if err := sysinit.New(d.exec).ForceStop("kubelet"); err != nil { + glog.Warningf("couldn't force stop kubelet. will continue with kill anyways: %v", err) } // First try to gracefully stop containers @@ -220,8 +211,11 @@ func (d *Driver) Start() error { // Stop a host gracefully, including any containers that we are managing. func (d *Driver) Stop() error { - if err := stopKubelet(d.exec); err != nil { - return errors.Wrap(err, "stop kubelet") + if err := sysinit.New(d.exec).Stop("kubelet"); err != nil { + glog.Warningf("couldn't stop kubelet. will continue with stop anyways: %v", err) + if err := sysinit.New(d.exec).ForceStop("kubelet"); err != nil { + glog.Warningf("couldn't force stop kubelet. will continue with stop anyways: %v", err) + } } containers, err := d.runtime.ListContainers(cruntime.ListOptions{}) if err != nil { @@ -241,32 +235,6 @@ func (d *Driver) RunSSHCommandFromDriver() error { return fmt.Errorf("driver does not support ssh commands") } -// stopKubelet idempotently stops the kubelet -func stopKubelet(cr command.Runner) error { - glog.Infof("stopping kubelet.service ...") - stop := func() error { - cmd := exec.Command("sudo", "systemctl", "stop", "kubelet.service") - if rr, err := cr.RunCmd(cmd); err != nil { - glog.Errorf("temporary error for %q : %v", rr.Command(), err) - } - cmd = exec.Command("sudo", "systemctl", "show", "-p", "SubState", "kubelet") - rr, err := cr.RunCmd(cmd) - if err != nil { - glog.Errorf("temporary error: for %q : %v", rr.Command(), err) - } - if !strings.Contains(rr.Stdout.String(), "dead") && !strings.Contains(rr.Stdout.String(), "failed") { - return fmt.Errorf("unexpected kubelet state: %q", rr.Stdout.String()) - } - return nil - } - - if err := retry.Expo(stop, 2*time.Second, time.Minute*3, 5); err != nil { - return errors.Wrapf(err, "error stopping kubelet") - } - - return nil -} - // restartKubelet restarts the kubelet func restartKubelet(cr command.Runner) error { glog.Infof("restarting kubelet.service ...") diff --git a/pkg/generate/docs.go b/pkg/generate/docs.go new file mode 100644 index 0000000000..895362214b --- /dev/null +++ b/pkg/generate/docs.go @@ -0,0 +1,138 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generate + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" + + "github.com/golang/glog" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/cobra/doc" + "k8s.io/minikube/pkg/minikube/out" +) + +// Docs generates docs for minikube command +func Docs(root *cobra.Command, path string) error { + cmds := root.Commands() + for _, c := range cmds { + if c.Hidden { + glog.Infof("Skipping generating doc for %s as it's a hidden command", c.Name()) + continue + } + contents, err := DocForCommand(c) + if err != nil { + return errors.Wrapf(err, "generating doc for %s", c.Name()) + } + if err := saveDocForCommand(c, []byte(contents), path); err != nil { + return errors.Wrapf(err, "saving doc for %s", c.Name()) + } + } + return nil +} + +// DocForCommand returns the specific doc for that command +func DocForCommand(command *cobra.Command) (string, error) { + buf := bytes.NewBuffer([]byte{}) + if err := generateTitle(command, buf); err != nil { + return "", errors.Wrap(err, "generating title") + } + if err := rewriteFlags(command); err != nil { + return "", errors.Wrap(err, "rewriting flags") + } + if err := writeSubcommands(command, buf); err != nil { + return "", errors.Wrap(err, "writing subcommands") + } + return removeHelpText(buf), nil +} + +// after every command, cobra automatically appends +// ### SEE ALSO + +// * [minikube addons](minikube_addons.md) - Modify minikube's kubernetes addons + +// ###### Auto generated by spf13/cobra on 1-Apr-2020 +// help text which is unnecessary info after every subcommand +// This function removes that text. +func removeHelpText(buffer *bytes.Buffer) string { + beginningHelpText := "### SEE ALSO" + endHelpText := "###### Auto generated by spf13/cobra" + scanner := bufio.NewScanner(buffer) + includeLine := true + + final := bytes.NewBuffer([]byte{}) + for scanner.Scan() { + line := scanner.Text() + if strings.Contains(line, beginningHelpText) { + includeLine = false + continue + } + if strings.Contains(line, endHelpText) { + includeLine = true + continue + } + if !includeLine { + continue + } + // scanner strips the ending newline + if _, err := final.WriteString(line + "\n"); err != nil { + glog.Warningf("error removing help text: %v", err) + break + } + } + return final.String() +} + +// writeSubcommands recursively appends all subcommands to the doc +func writeSubcommands(command *cobra.Command, w io.Writer) error { + if err := doc.GenMarkdown(command, w); err != nil { + return errors.Wrapf(err, "getting markdown custom") + } + if !command.HasSubCommands() { + return nil + } + subCommands := command.Commands() + for _, sc := range subCommands { + if err := writeSubcommands(sc, w); err != nil { + return err + } + } + return nil +} + +func generateTitle(command *cobra.Command, w io.Writer) error { + date := time.Now().Format("2006-01-02") + title := out.ApplyTemplateFormatting(9999, false, title, out.V{"Command": command.Name(), "Description": command.Short, "Date": date}) + _, err := w.Write([]byte(title)) + return err +} + +func saveDocForCommand(command *cobra.Command, contents []byte, path string) error { + fp := filepath.Join(path, fmt.Sprintf("%s.md", command.Name())) + if err := os.Remove(fp); err != nil { + glog.Warningf("error removing %s", fp) + } + return ioutil.WriteFile(fp, contents, 0644) +} diff --git a/pkg/generate/docs_templates.go b/pkg/generate/docs_templates.go new file mode 100644 index 0000000000..f7f5048370 --- /dev/null +++ b/pkg/generate/docs_templates.go @@ -0,0 +1,26 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generate + +var title = `--- +title: "{{.Command}}" +description: > + {{.Description}} +--- + + +` diff --git a/pkg/generate/rewrite.go b/pkg/generate/rewrite.go new file mode 100644 index 0000000000..fa7ca9d425 --- /dev/null +++ b/pkg/generate/rewrite.go @@ -0,0 +1,57 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generate + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +type rewrite struct { + flag string + usage string + defaultVal string +} + +// rewriteFlags rewrites flags that are dependent on operating system +// for example, for `minikube start`, the usage of --driver +// outputs possible drivers for the operating system +func rewriteFlags(command *cobra.Command) error { + rewrites := map[string][]rewrite{ + "start": []rewrite{{ + flag: "driver", + usage: "Used to specify the driver to run kubernetes in. The list of available drivers depends on operating system.", + }, { + flag: "mount-string", + usage: "The argument to pass the minikube mount command on start.", + }}, + } + rws, ok := rewrites[command.Name()] + if !ok { + return nil + } + for _, r := range rws { + flag := command.Flag(r.flag) + if flag == nil { + return fmt.Errorf("--%s is not a valid flag for %s", r.flag, command.Name()) + } + flag.Usage = r.usage + flag.DefValue = r.defaultVal + } + return nil +} diff --git a/pkg/gvisor/enable.go b/pkg/gvisor/enable.go index 40f69b2fc9..27d0260406 100644 --- a/pkg/gvisor/enable.go +++ b/pkg/gvisor/enable.go @@ -181,7 +181,7 @@ func copyAssetToDest(targetName, dest string) error { log.Printf("%s asset path: %s", targetName, src) contents, err := ioutil.ReadFile(src) if err != nil { - return errors.Wrapf(err, "getting contents of %s", asset.GetAssetName()) + return errors.Wrapf(err, "getting contents of %s", asset.GetSourcePath()) } if _, err := os.Stat(dest); err == nil { if err := os.Remove(dest); err != nil { diff --git a/pkg/minikube/assets/addons.go b/pkg/minikube/assets/addons.go index b25446e8f6..6d8d3f9ef6 100644 --- a/pkg/minikube/assets/addons.go +++ b/pkg/minikube/assets/addons.go @@ -19,8 +19,6 @@ package assets import ( "runtime" - "github.com/golang/glog" - "github.com/pkg/errors" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/vmpath" @@ -49,21 +47,14 @@ func (a *Addon) Name() string { } // IsEnabled checks if an Addon is enabled for the given profile -func (a *Addon) IsEnabled(profile string) (bool, error) { - c, err := config.Load(profile) - if err != nil { - return false, errors.Wrap(err, "load") - } - - // Is this addon explicitly listed in their configuration? - status, ok := c.Addons[a.Name()] - glog.V(1).Infof("IsEnabled %q = %v (listed in config=%v)", a.Name(), status, ok) +func (a *Addon) IsEnabled(cc *config.ClusterConfig) bool { + status, ok := cc.Addons[a.Name()] if ok { - return status, nil + return status } // Return the default unconfigured state of the addon - return a.enabled, nil + return a.enabled } // Addons is the list of addons @@ -246,6 +237,38 @@ var Addons = map[string]*Addon{ "0640", false), }, false, "registry-creds"), + "registry-aliases": NewAddon([]*BinAsset{ + MustBinAsset( + "deploy/addons/registry-aliases/registry-aliases-sa.tmpl", + vmpath.GuestAddonsDir, + "registry-aliases-sa.yaml", + "0640", + false), + MustBinAsset( + "deploy/addons/registry-aliases/registry-aliases-sa-crb.tmpl", + vmpath.GuestAddonsDir, + "registry-aliases-sa-crb.yaml", + "0640", + false), + MustBinAsset( + "deploy/addons/registry-aliases/registry-aliases-config.tmpl", + vmpath.GuestAddonsDir, + "registry-aliases-config.yaml", + "0640", + false), + MustBinAsset( + "deploy/addons/registry-aliases/node-etc-hosts-update.tmpl", + vmpath.GuestAddonsDir, + "node-etc-hosts-update.yaml", + "0640", + false), + MustBinAsset( + "deploy/addons/registry-aliases/patch-coredns-job.tmpl", + vmpath.GuestAddonsDir, + "patch-coredns-job.yaml", + "0640", + false), + }, false, "registry-aliases"), "freshpod": NewAddon([]*BinAsset{ MustBinAsset( "deploy/addons/freshpod/freshpod-rc.yaml.tmpl", @@ -264,11 +287,11 @@ var Addons = map[string]*Addon{ }, false, "nvidia-driver-installer"), "nvidia-gpu-device-plugin": NewAddon([]*BinAsset{ MustBinAsset( - "deploy/addons/gpu/nvidia-gpu-device-plugin.yaml.tmpl", + "deploy/addons/gpu/nvidia-gpu-device-plugin.yaml", vmpath.GuestAddonsDir, "nvidia-gpu-device-plugin.yaml", "0640", - true), + false), }, false, "nvidia-gpu-device-plugin"), "logviewer": NewAddon([]*BinAsset{ MustBinAsset( diff --git a/pkg/minikube/assets/vm_assets.go b/pkg/minikube/assets/vm_assets.go index c3f30a667d..3d4541d754 100644 --- a/pkg/minikube/assets/vm_assets.go +++ b/pkg/minikube/assets/vm_assets.go @@ -29,11 +29,15 @@ import ( "github.com/pkg/errors" ) +// MemorySource is the source name used for in-memory copies +const MemorySource = "memory" + // CopyableFile is something that can be copied type CopyableFile interface { io.Reader GetLength() int - GetAssetName() string + GetSourcePath() string + GetTargetDir() string GetTargetName() string GetPermissions() string @@ -43,15 +47,16 @@ type CopyableFile interface { // BaseAsset is the base asset class type BaseAsset struct { - AssetName string + SourcePath string TargetDir string TargetName string Permissions string + Source string } -// GetAssetName returns asset name -func (b *BaseAsset) GetAssetName() string { - return b.AssetName +// GetSourcePath returns asset name +func (b *BaseAsset) GetSourcePath() string { + return b.SourcePath } // GetTargetDir returns target dir @@ -88,30 +93,37 @@ func NewMemoryAssetTarget(d []byte, targetPath, permissions string) *MemoryAsset // NewFileAsset creates a new FileAsset func NewFileAsset(src, targetDir, targetName, permissions string) (*FileAsset, error) { glog.V(4).Infof("NewFileAsset: %s -> %s", src, path.Join(targetDir, targetName)) + f, err := os.Open(src) if err != nil { - return nil, errors.Wrapf(err, "Error opening file asset: %s", src) + return nil, errors.Wrap(err, "open") } + info, err := os.Stat(src) if err != nil { - return nil, errors.Wrapf(err, "Error getting info for %s", src) + return nil, errors.Wrapf(err, "stat") } - r := io.NewSectionReader(f, 0, info.Size()) + + if info.Size() == 0 { + glog.Warningf("NewFileAsset: %s is an empty file!", src) + } + return &FileAsset{ BaseAsset: BaseAsset{ - AssetName: src, + SourcePath: src, TargetDir: targetDir, TargetName: targetName, Permissions: permissions, }, - reader: r, + reader: io.NewSectionReader(f, 0, info.Size()), }, nil } // GetLength returns the file length, or 0 (on error) func (f *FileAsset) GetLength() (flen int) { - fi, err := os.Stat(f.AssetName) + fi, err := os.Stat(f.SourcePath) if err != nil { + glog.Errorf("stat(%q) failed: %v", f.SourcePath, err) return 0 } return int(fi.Size()) @@ -119,8 +131,12 @@ func (f *FileAsset) GetLength() (flen int) { // GetModTime returns modification time of the file func (f *FileAsset) GetModTime() (time.Time, error) { - fi, err := os.Stat(f.AssetName) - return fi.ModTime(), err + fi, err := os.Stat(f.SourcePath) + if err != nil { + glog.Errorf("stat(%q) failed: %v", f.SourcePath, err) + return time.Time{}, err + } + return fi.ModTime(), nil } // Read reads the asset @@ -165,6 +181,7 @@ func NewMemoryAsset(d []byte, targetDir, targetName, permissions string) *Memory TargetDir: targetDir, TargetName: targetName, Permissions: permissions, + SourcePath: MemorySource, }, reader: bytes.NewReader(d), length: len(d), @@ -192,7 +209,7 @@ func MustBinAsset(name, targetDir, targetName, permissions string, isTemplate bo func NewBinAsset(name, targetDir, targetName, permissions string, isTemplate bool) (*BinAsset, error) { m := &BinAsset{ BaseAsset: BaseAsset{ - AssetName: name, + SourcePath: name, TargetDir: targetDir, TargetName: targetName, Permissions: permissions, @@ -215,13 +232,13 @@ func defaultValue(defValue string, val interface{}) string { } func (m *BinAsset) loadData(isTemplate bool) error { - contents, err := Asset(m.AssetName) + contents, err := Asset(m.SourcePath) if err != nil { return err } if isTemplate { - tpl, err := template.New(m.AssetName).Funcs(template.FuncMap{"default": defaultValue}).Parse(string(contents)) + tpl, err := template.New(m.SourcePath).Funcs(template.FuncMap{"default": defaultValue}).Parse(string(contents)) if err != nil { return err } @@ -231,9 +248,9 @@ func (m *BinAsset) loadData(isTemplate bool) error { m.length = len(contents) m.reader = bytes.NewReader(contents) - glog.V(1).Infof("Created asset %s with %d bytes", m.AssetName, m.length) + glog.V(1).Infof("Created asset %s with %d bytes", m.SourcePath, m.length) if m.length == 0 { - return fmt.Errorf("%s is an empty asset", m.AssetName) + return fmt.Errorf("%s is an empty asset", m.SourcePath) } return nil } @@ -246,7 +263,7 @@ func (m *BinAsset) IsTemplate() bool { // Evaluate evaluates the template to a new asset func (m *BinAsset) Evaluate(data interface{}) (*MemoryAsset, error) { if !m.IsTemplate() { - return nil, errors.Errorf("the asset %s is not a template", m.AssetName) + return nil, errors.Errorf("the asset %s is not a template", m.SourcePath) } diff --git a/pkg/minikube/bootstrapper/bootstrapper.go b/pkg/minikube/bootstrapper/bootstrapper.go index 712446285d..0019b1750d 100644 --- a/pkg/minikube/bootstrapper/bootstrapper.go +++ b/pkg/minikube/bootstrapper/bootstrapper.go @@ -17,12 +17,12 @@ limitations under the License. package bootstrapper import ( - "net" "time" "k8s.io/minikube/pkg/minikube/bootstrapper/images" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" + "k8s.io/minikube/pkg/minikube/cruntime" ) // LogOptions are options to be passed to LogCommands @@ -38,12 +38,14 @@ type Bootstrapper interface { StartCluster(config.ClusterConfig) error UpdateCluster(config.ClusterConfig) error DeleteCluster(config.KubernetesConfig) error - WaitForCluster(config.ClusterConfig, time.Duration) error + WaitForNode(config.ClusterConfig, config.Node, time.Duration) error + JoinCluster(config.ClusterConfig, config.Node, string) error + UpdateNode(config.ClusterConfig, config.Node, cruntime.Manager) error + GenerateToken(config.ClusterConfig) (string, error) // LogCommands returns a map of log type to a command which will display that log. - LogCommands(LogOptions) map[string]string + LogCommands(config.ClusterConfig, LogOptions) map[string]string SetupCerts(config.KubernetesConfig, config.Node) error - GetKubeletStatus() (string, error) - GetAPIServerStatus(net.IP, int) (string, error) + GetAPIServerStatus(string, int) (string, error) } const ( diff --git a/pkg/minikube/bootstrapper/bsutil/binaries.go b/pkg/minikube/bootstrapper/bsutil/binaries.go index 32b9a166a2..3eecdf905c 100644 --- a/pkg/minikube/bootstrapper/bsutil/binaries.go +++ b/pkg/minikube/bootstrapper/bsutil/binaries.go @@ -32,11 +32,12 @@ import ( "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/download" "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/sysinit" "k8s.io/minikube/pkg/minikube/vmpath" ) // TransferBinaries transfers all required Kubernetes binaries -func TransferBinaries(cfg config.KubernetesConfig, c command.Runner) error { +func TransferBinaries(cfg config.KubernetesConfig, c command.Runner, sm sysinit.Manager) error { ok, err := binariesExist(cfg, c) if err == nil && ok { glog.Info("Found k8s binaries, skipping transfer") @@ -59,6 +60,12 @@ func TransferBinaries(cfg config.KubernetesConfig, c command.Runner) error { return errors.Wrapf(err, "downloading %s", name) } + if name == "kubelet" && sm.Active(name) { + if err := sm.ForceStop(name); err != nil { + glog.Errorf("unable to stop kubelet: %v", err) + } + } + dst := path.Join(dir, name) if err := machine.CopyBinary(c, src, dst); err != nil { return errors.Wrapf(err, "copybinary %s -> %s", src, dst) diff --git a/pkg/minikube/bootstrapper/bsutil/files.go b/pkg/minikube/bootstrapper/bsutil/files.go index c184eca3c9..8d6bc05bc3 100644 --- a/pkg/minikube/bootstrapper/bsutil/files.go +++ b/pkg/minikube/bootstrapper/bsutil/files.go @@ -20,8 +20,6 @@ package bsutil import ( "path" - "k8s.io/minikube/pkg/minikube/assets" - "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/vmpath" ) @@ -35,20 +33,8 @@ const ( KubeletServiceFile = "/lib/systemd/system/kubelet.service" // KubeletSystemdConfFile is config for the systemd kubelet.service KubeletSystemdConfFile = "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf" + // InitRestartWrapper is ... + InitRestartWrapper = "/etc/init.d/.restart_wrapper.sh" + // KubeletInitPath is where Sys-V style init script is installed + KubeletInitPath = "/etc/init.d/kubelet" ) - -// ConfigFileAssets returns configuration file assets -func ConfigFileAssets(cfg config.KubernetesConfig, kubeadm []byte, kubelet []byte, kubeletSvc []byte, defaultCNIConfig []byte) []assets.CopyableFile { - fs := []assets.CopyableFile{ - assets.NewMemoryAssetTarget(kubeadm, KubeadmYamlPath, "0640"), - assets.NewMemoryAssetTarget(kubelet, KubeletSystemdConfFile, "0644"), - assets.NewMemoryAssetTarget(kubeletSvc, KubeletServiceFile, "0644"), - } - // Copy the default CNI config (k8s.conf), so that kubelet can successfully - // start a Pod in the case a user hasn't manually installed any CNI plugin - // and minikube was started with "--extra-config=kubelet.network-plugin=cni". - if defaultCNIConfig != nil { - fs = append(fs, assets.NewMemoryAssetTarget(defaultCNIConfig, DefaultCNIConfigPath, "0644")) - } - return fs -} diff --git a/pkg/minikube/bootstrapper/bsutil/ktmpl/v1alpha3.go b/pkg/minikube/bootstrapper/bsutil/ktmpl/v1alpha3.go index 4c534c161e..a459bb8a27 100644 --- a/pkg/minikube/bootstrapper/bsutil/ktmpl/v1alpha3.go +++ b/pkg/minikube/bootstrapper/bsutil/ktmpl/v1alpha3.go @@ -64,6 +64,8 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" nodefs.inodesFree: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta1.go b/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta1.go index 1aca76201a..cbd2ec6d20 100644 --- a/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta1.go +++ b/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta1.go @@ -73,6 +73,7 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta2.go b/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta2.go index 84c06b61ce..8655dfa7df 100644 --- a/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta2.go +++ b/pkg/minikube/bootstrapper/bsutil/ktmpl/v1beta2.go @@ -56,7 +56,7 @@ kind: ClusterConfiguration {{range $i, $val := .FeatureArgs}}{{$i}}: {{$val}} {{end -}}{{end -}} certificatesDir: {{.CertDir}} -clusterName: kubernetes +clusterName: mk controlPlaneEndpoint: {{.ControlPlaneAddress}}:{{.APIServerPort}} dns: type: CoreDNS @@ -71,6 +71,7 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm.go b/pkg/minikube/bootstrapper/bsutil/kubeadm.go index 80c4471175..5df897dbb6 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm.go @@ -37,8 +37,8 @@ import ( const remoteContainerRuntime = "remote" // GenerateKubeadmYAML generates the kubeadm.yaml file -func GenerateKubeadmYAML(mc config.ClusterConfig, r cruntime.Manager, n config.Node) ([]byte, error) { - k8s := mc.KubernetesConfig +func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Manager) ([]byte, error) { + k8s := cc.KubernetesConfig version, err := util.ParseKubernetesVersion(k8s.KubernetesVersion) if err != nil { return nil, errors.Wrap(err, "parsing kubernetes version") @@ -51,7 +51,7 @@ func GenerateKubeadmYAML(mc config.ClusterConfig, r cruntime.Manager, n config.N } // In case of no port assigned, use default - cp, err := config.PrimaryControlPlane(&mc) + cp, err := config.PrimaryControlPlane(&cc) if err != nil { return nil, errors.Wrap(err, "getting control plane") } @@ -87,22 +87,21 @@ func GenerateKubeadmYAML(mc config.ClusterConfig, r cruntime.Manager, n config.N CertDir: vmpath.GuestKubernetesCertsDir, ServiceCIDR: constants.DefaultServiceCIDR, PodSubnet: k8s.ExtraOptions.Get("pod-network-cidr", Kubeadm), - AdvertiseAddress: cp.IP, + AdvertiseAddress: n.IP, APIServerPort: nodePort, KubernetesVersion: k8s.KubernetesVersion, EtcdDataDir: EtcdDataDir(), - ClusterName: k8s.ClusterName, - NodeName: cp.Name, - CRISocket: r.SocketPath(), - ImageRepository: k8s.ImageRepository, - ComponentOptions: componentOpts, - FeatureArgs: kubeadmFeatureArgs, - NoTaintMaster: false, // That does not work with k8s 1.12+ - DNSDomain: k8s.DNSDomain, - NodeIP: n.IP, - // NOTE: If set to an specific VM IP, things may break if the IP changes on host restart - // For multi-node, we may need to figure out an alternate strategy, like DNS or hosts files - ControlPlaneAddress: "localhost", + ClusterName: cc.Name, + //kubeadm uses NodeName as the --hostname-override parameter, so this needs to be the name of the machine + NodeName: KubeNodeName(cc, n), + CRISocket: r.SocketPath(), + ImageRepository: k8s.ImageRepository, + ComponentOptions: componentOpts, + FeatureArgs: kubeadmFeatureArgs, + NoTaintMaster: false, // That does not work with k8s 1.12+ + DNSDomain: k8s.DNSDomain, + NodeIP: n.IP, + ControlPlaneAddress: cp.IP, } if k8s.ServiceCIDR != "" { diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go b/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go index ef87c48e99..cd39751c1b 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go @@ -113,7 +113,7 @@ func TestGenerateKubeadmYAMLDNS(t *testing.T) { shouldErr bool cfg config.ClusterConfig }{ - {"dns", "docker", false, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{DNSDomain: "1.1.1.1"}}}, + {"dns", "docker", false, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{DNSDomain: "1.1.1.1"}}}, } for _, version := range versions { for _, tc := range tests { @@ -134,7 +134,7 @@ func TestGenerateKubeadmYAMLDNS(t *testing.T) { cfg.KubernetesConfig.KubernetesVersion = version + ".0" cfg.KubernetesConfig.ClusterName = "kubernetes" - got, err := GenerateKubeadmYAML(cfg, runtime, cfg.Nodes[0]) + got, err := GenerateKubeadmYAML(cfg, cfg.Nodes[0], runtime) if err != nil && !tc.shouldErr { t.Fatalf("got unexpected error generating config: %v", err) } @@ -179,15 +179,15 @@ func TestGenerateKubeadmYAML(t *testing.T) { shouldErr bool cfg config.ClusterConfig }{ - {"default", "docker", false, config.ClusterConfig{}}, - {"containerd", "containerd", false, config.ClusterConfig{}}, - {"crio", "crio", false, config.ClusterConfig{}}, - {"options", "docker", false, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOpts}}}, - {"crio-options-gates", "crio", false, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOpts, FeatureGates: "a=b"}}}, - {"unknown-component", "docker", true, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: config.ExtraOptionSlice{config.ExtraOption{Component: "not-a-real-component", Key: "killswitch", Value: "true"}}}}}, - {"containerd-api-port", "containerd", false, config.ClusterConfig{Nodes: []config.Node{{Port: 12345}}}}, - {"containerd-pod-network-cidr", "containerd", false, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOptsPodCidr}}}, - {"image-repository", "docker", false, config.ClusterConfig{KubernetesConfig: config.KubernetesConfig{ImageRepository: "test/repo"}}}, + {"default", "docker", false, config.ClusterConfig{Name: "mk"}}, + {"containerd", "containerd", false, config.ClusterConfig{Name: "mk"}}, + {"crio", "crio", false, config.ClusterConfig{Name: "mk"}}, + {"options", "docker", false, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOpts}}}, + {"crio-options-gates", "crio", false, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOpts, FeatureGates: "a=b"}}}, + {"unknown-component", "docker", true, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ExtraOptions: config.ExtraOptionSlice{config.ExtraOption{Component: "not-a-real-component", Key: "killswitch", Value: "true"}}}}}, + {"containerd-api-port", "containerd", false, config.ClusterConfig{Name: "mk", Nodes: []config.Node{{Port: 12345}}}}, + {"containerd-pod-network-cidr", "containerd", false, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ExtraOptions: extraOptsPodCidr}}}, + {"image-repository", "docker", false, config.ClusterConfig{Name: "mk", KubernetesConfig: config.KubernetesConfig{ImageRepository: "test/repo"}}}, } for _, version := range versions { for _, tc := range tests { @@ -215,7 +215,7 @@ func TestGenerateKubeadmYAML(t *testing.T) { cfg.KubernetesConfig.KubernetesVersion = version + ".0" cfg.KubernetesConfig.ClusterName = "kubernetes" - got, err := GenerateKubeadmYAML(cfg, runtime, cfg.Nodes[0]) + got, err := GenerateKubeadmYAML(cfg, cfg.Nodes[0], runtime) if err != nil && !tc.shouldErr { t.Fatalf("got unexpected error generating config: %v", err) } diff --git a/pkg/minikube/bootstrapper/bsutil/kubelet.go b/pkg/minikube/bootstrapper/bsutil/kubelet.go index 8ec9d01fc6..1ed22d17c6 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubelet.go +++ b/pkg/minikube/bootstrapper/bsutil/kubelet.go @@ -19,6 +19,7 @@ package bsutil import ( "bytes" + "os" "path" "github.com/pkg/errors" @@ -26,6 +27,7 @@ import ( "k8s.io/minikube/pkg/minikube/bootstrapper/images" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/cruntime" + "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/util" ) @@ -59,8 +61,9 @@ func extraKubeletOpts(mc config.ClusterConfig, nc config.Node, r cruntime.Manage if _, ok := extraOpts["node-ip"]; !ok { extraOpts["node-ip"] = cp.IP } - if nc.Name != "" { - extraOpts["hostname-override"] = nc.Name + if _, ok := extraOpts["hostname-override"]; !ok { + nodeName := KubeNodeName(mc, nc) + extraOpts["hostname-override"] = nodeName } pauseImage := images.Pause(version, k8s.ImageRepository) @@ -115,3 +118,13 @@ func NewKubeletService(cfg config.KubernetesConfig) ([]byte, error) { } return b.Bytes(), nil } + +// KubeNodeName returns the node name registered in Kubernetes +func KubeNodeName(cc config.ClusterConfig, n config.Node) string { + if cc.Driver == driver.None { + // Always use hostname for "none" driver + hostname, _ := os.Hostname() + return hostname + } + return driver.MachineName(cc, n) +} diff --git a/pkg/minikube/bootstrapper/bsutil/kubelet_test.go b/pkg/minikube/bootstrapper/bsutil/kubelet_test.go index 3019ee1f52..660b8e5b91 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubelet_test.go +++ b/pkg/minikube/bootstrapper/bsutil/kubelet_test.go @@ -37,6 +37,7 @@ func TestGenerateKubeletConfig(t *testing.T) { { description: "old docker", cfg: config.ClusterConfig{ + Name: "minikube", KubernetesConfig: config.KubernetesConfig{ KubernetesVersion: constants.OldestKubernetesVersion, ContainerRuntime: "docker", @@ -62,6 +63,7 @@ ExecStart=/var/lib/minikube/binaries/v1.11.10/kubelet --allow-privileged=true -- { description: "newest cri runtime", cfg: config.ClusterConfig{ + Name: "minikube", KubernetesConfig: config.KubernetesConfig{ KubernetesVersion: constants.NewestKubernetesVersion, ContainerRuntime: "cri-o", @@ -79,7 +81,7 @@ Wants=crio.service [Service] ExecStart= -ExecStart=/var/lib/minikube/binaries/v1.18.0-beta.2/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=/var/run/crio/crio.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=/var/run/crio/crio.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m +ExecStart=/var/lib/minikube/binaries/v1.18.0/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=/var/run/crio/crio.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=/var/run/crio/crio.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m [Install] `, @@ -87,6 +89,7 @@ ExecStart=/var/lib/minikube/binaries/v1.18.0-beta.2/kubelet --authorization-mode { description: "default containerd runtime", cfg: config.ClusterConfig{ + Name: "minikube", KubernetesConfig: config.KubernetesConfig{ KubernetesVersion: constants.DefaultKubernetesVersion, ContainerRuntime: "containerd", @@ -104,7 +107,7 @@ Wants=containerd.service [Service] ExecStart= -ExecStart=/var/lib/minikube/binaries/v1.17.3/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m +ExecStart=/var/lib/minikube/binaries/v1.18.0/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m [Install] `, @@ -112,6 +115,7 @@ ExecStart=/var/lib/minikube/binaries/v1.17.3/kubelet --authorization-mode=Webhoo { description: "default containerd runtime with IP override", cfg: config.ClusterConfig{ + Name: "minikube", KubernetesConfig: config.KubernetesConfig{ KubernetesVersion: constants.DefaultKubernetesVersion, ContainerRuntime: "containerd", @@ -136,7 +140,7 @@ Wants=containerd.service [Service] ExecStart= -ExecStart=/var/lib/minikube/binaries/v1.17.3/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.200 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m +ExecStart=/var/lib/minikube/binaries/v1.18.0/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.200 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m [Install] `, @@ -144,6 +148,7 @@ ExecStart=/var/lib/minikube/binaries/v1.17.3/kubelet --authorization-mode=Webhoo { description: "docker with custom image repository", cfg: config.ClusterConfig{ + Name: "minikube", KubernetesConfig: config.KubernetesConfig{ KubernetesVersion: constants.DefaultKubernetesVersion, ContainerRuntime: "docker", @@ -162,7 +167,7 @@ Wants=docker.socket [Service] ExecStart= -ExecStart=/var/lib/minikube/binaries/v1.17.3/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=docker --fail-swap-on=false --hostname-override=minikube --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-infra-container-image=docker-proxy-image.io/google_containers/pause:3.1 --pod-manifest-path=/etc/kubernetes/manifests +ExecStart=/var/lib/minikube/binaries/v1.18.0/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=docker --fail-swap-on=false --hostname-override=minikube --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-infra-container-image=docker-proxy-image.io/google_containers/pause:3.2 --pod-manifest-path=/etc/kubernetes/manifests [Install] `, diff --git a/pkg/minikube/bootstrapper/bsutil/kverify/api_server.go b/pkg/minikube/bootstrapper/bsutil/kverify/api_server.go new file mode 100644 index 0000000000..6aa6763a26 --- /dev/null +++ b/pkg/minikube/bootstrapper/bsutil/kverify/api_server.go @@ -0,0 +1,206 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package kverify verifies a running kubernetes cluster is healthy +package kverify + +import ( + "crypto/tls" + "fmt" + "net" + "net/http" + "os/exec" + "path" + "strconv" + "strings" + "time" + + "github.com/docker/machine/libmachine/state" + "github.com/golang/glog" + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/version" + "k8s.io/client-go/kubernetes" + kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants" + "k8s.io/minikube/pkg/minikube/bootstrapper" + "k8s.io/minikube/pkg/minikube/command" + "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/cruntime" +) + +// WaitForAPIServerProcess waits for api server to be healthy returns error if it doesn't +func WaitForAPIServerProcess(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner, start time.Time, timeout time.Duration) error { + glog.Infof("waiting for apiserver process to appear ...") + err := wait.PollImmediate(time.Millisecond*500, timeout, func() (bool, error) { + if time.Since(start) > timeout { + return false, fmt.Errorf("cluster wait timed out during process check") + } + + if time.Since(start) > minLogCheckTime { + announceProblems(r, bs, cfg, cr) + time.Sleep(kconst.APICallRetryInterval * 5) + } + + if _, ierr := apiServerPID(cr); ierr != nil { + return false, nil + } + + return true, nil + }) + if err != nil { + return fmt.Errorf("apiserver process never appeared") + } + glog.Infof("duration metric: took %s to wait for apiserver process to appear ...", time.Since(start)) + return nil +} + +// apiServerPID returns our best guess to the apiserver pid +func apiServerPID(cr command.Runner) (int, error) { + rr, err := cr.RunCmd(exec.Command("sudo", "pgrep", "-xnf", "kube-apiserver.*minikube.*")) + if err != nil { + return 0, err + } + s := strings.TrimSpace(rr.Stdout.String()) + return strconv.Atoi(s) +} + +// WaitForHealthyAPIServer waits for api server status to be running +func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner, client *kubernetes.Clientset, start time.Time, hostname string, port int, timeout time.Duration) error { + glog.Infof("waiting for apiserver healthz status ...") + hStart := time.Now() + + healthz := func() (bool, error) { + if time.Since(start) > timeout { + return false, fmt.Errorf("cluster wait timed out during healthz check") + } + + if time.Since(start) > minLogCheckTime { + announceProblems(r, bs, cfg, cr) + time.Sleep(kconst.APICallRetryInterval * 5) + } + + status, err := apiServerHealthz(hostname, port) + if err != nil { + glog.Warningf("status: %v", err) + return false, nil + } + if status != state.Running { + return false, nil + } + return true, nil + } + + if err := wait.PollImmediate(kconst.APICallRetryInterval, kconst.DefaultControlPlaneTimeout, healthz); err != nil { + return fmt.Errorf("apiserver healthz never reported healthy") + } + + vcheck := func() (bool, error) { + if time.Since(start) > timeout { + return false, fmt.Errorf("cluster wait timed out during version check") + } + if err := APIServerVersionMatch(client, cfg.KubernetesConfig.KubernetesVersion); err != nil { + glog.Warningf("api server version match failed: %v", err) + return false, nil + } + return true, nil + } + + if err := wait.PollImmediate(kconst.APICallRetryInterval, kconst.DefaultControlPlaneTimeout, vcheck); err != nil { + return fmt.Errorf("controlPlane never updated to %s", cfg.KubernetesConfig.KubernetesVersion) + } + + glog.Infof("duration metric: took %s to wait for apiserver health ...", time.Since(hStart)) + return nil +} + +// APIServerVersionMatch checks if the server version matches the expected +func APIServerVersionMatch(client *kubernetes.Clientset, expected string) error { + vi, err := client.ServerVersion() + if err != nil { + return errors.Wrap(err, "server version") + } + glog.Infof("control plane version: %s", vi) + if version.CompareKubeAwareVersionStrings(vi.String(), expected) != 0 { + return fmt.Errorf("controlPane = %q, expected: %q", vi.String(), expected) + } + return nil +} + +// APIServerStatus returns apiserver status in libmachine style state.State +func APIServerStatus(cr command.Runner, hostname string, port int) (state.State, error) { + glog.Infof("Checking apiserver status ...") + + pid, err := apiServerPID(cr) + if err != nil { + glog.Warningf("stopped: unable to get apiserver pid: %v", err) + return state.Stopped, nil + } + + // Get the freezer cgroup entry for this pid + rr, err := cr.RunCmd(exec.Command("sudo", "egrep", "^[0-9]+:freezer:", fmt.Sprintf("/proc/%d/cgroup", pid))) + if err != nil { + glog.Warningf("unable to find freezer cgroup: %v", err) + return apiServerHealthz(hostname, port) + + } + freezer := strings.TrimSpace(rr.Stdout.String()) + glog.Infof("apiserver freezer: %q", freezer) + fparts := strings.Split(freezer, ":") + if len(fparts) != 3 { + glog.Warningf("unable to parse freezer - found %d parts: %s", len(fparts), freezer) + return apiServerHealthz(hostname, port) + } + + rr, err = cr.RunCmd(exec.Command("sudo", "cat", path.Join("/sys/fs/cgroup/freezer", fparts[2], "freezer.state"))) + if err != nil { + glog.Errorf("unable to get freezer state: %s", rr.Stderr.String()) + return apiServerHealthz(hostname, port) + } + + fs := strings.TrimSpace(rr.Stdout.String()) + glog.Infof("freezer state: %q", fs) + if fs == "FREEZING" || fs == "FROZEN" { + return state.Paused, nil + } + return apiServerHealthz(hostname, port) +} + +// apiServerHealthz hits the /healthz endpoint and returns libmachine style state.State +func apiServerHealthz(hostname string, port int) (state.State, error) { + url := fmt.Sprintf("https://%s/healthz", net.JoinHostPort(hostname, fmt.Sprint(port))) + glog.Infof("Checking apiserver healthz at %s ...", url) + // To avoid: x509: certificate signed by unknown authority + tr := &http.Transport{ + Proxy: nil, // To avoid connectiv issue if http(s)_proxy is set. + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + } + client := &http.Client{Transport: tr} + resp, err := client.Get(url) + // Connection refused, usually. + if err != nil { + glog.Infof("stopped: %s: %v", url, err) + return state.Stopped, nil + } + if resp.StatusCode == http.StatusUnauthorized { + glog.Errorf("%s returned code %d (unauthorized). Please ensure that your apiserver authorization settings make sense!", url, resp.StatusCode) + return state.Error, nil + } + if resp.StatusCode != http.StatusOK { + glog.Warningf("%s response: %v %+v", url, err, resp) + return state.Error, nil + } + return state.Running, nil +} diff --git a/pkg/minikube/bootstrapper/bsutil/kverify/default_sa.go b/pkg/minikube/bootstrapper/bsutil/kverify/default_sa.go new file mode 100644 index 0000000000..453edff703 --- /dev/null +++ b/pkg/minikube/bootstrapper/bsutil/kverify/default_sa.go @@ -0,0 +1,56 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package kverify verifies a running kubernetes cluster is healthy +package kverify + +import ( + "fmt" + "time" + + "github.com/golang/glog" + "github.com/pkg/errors" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/minikube/pkg/util/retry" +) + +// WaitForDefaultSA waits for the default service account to be created. +func WaitForDefaultSA(cs *kubernetes.Clientset, timeout time.Duration) error { + glog.Info("waiting for default service account to be created ...") + start := time.Now() + saReady := func() error { + // equivalent to manual check of 'kubectl --context profile get serviceaccount default' + sas, err := cs.CoreV1().ServiceAccounts("default").List(meta.ListOptions{}) + if err != nil { + glog.Infof("temproary error waiting for default SA: %v", err) + return err + } + for _, sa := range sas.Items { + if sa.Name == "default" { + glog.Infof("found service account: %q", sa.Name) + return nil + } + } + return fmt.Errorf("couldn't find default service account") + } + if err := retry.Expo(saReady, 500*time.Millisecond, timeout); err != nil { + return errors.Wrapf(err, "waited %s for SA", time.Since(start)) + } + + glog.Infof("duration metric: took %s for default service account to be created ...", time.Since(start)) + return nil +} diff --git a/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go b/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go index aa076cecb9..f4486196c3 100644 --- a/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go +++ b/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors All rights reserved. +Copyright 2020 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,187 +18,52 @@ limitations under the License. package kverify import ( - "crypto/tls" - "fmt" - "net" - "net/http" - "os/exec" - "path" - "strconv" - "strings" "time" - - "github.com/docker/machine/libmachine/state" - "github.com/golang/glog" - meta "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" - kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants" - "k8s.io/minikube/pkg/minikube/command" ) -// APIServerProcess waits for api server to be healthy returns error if it doesn't -func APIServerProcess(runner command.Runner, start time.Time, timeout time.Duration) error { - glog.Infof("waiting for apiserver process to appear ...") - err := wait.PollImmediate(time.Millisecond*500, timeout, func() (bool, error) { - if time.Since(start) > timeout { - return false, fmt.Errorf("cluster wait timed out during process check") - } +// minLogCheckTime how long to wait before spamming error logs to console +const minLogCheckTime = 60 * time.Second - if _, ierr := apiServerPID(runner); ierr != nil { - return false, nil - } - return true, nil - }) - if err != nil { - return fmt.Errorf("apiserver process never appeared") +const ( + // APIServerWaitKey is the name used in the flags for k8s api server + APIServerWaitKey = "apiserver" + // SystemPodsWaitKey is the name used in the flags for pods in the kube system + SystemPodsWaitKey = "system_pods" + // DefaultSAWaitKey is the name used in the flags for default service account + DefaultSAWaitKey = "default_sa" + // AppsRunning is the name used in the flags for waiting for k8s-apps to be running + AppsRunning = "apps_running" +) + +// vars related to the --wait flag +var ( + // DefaultComponents is map of the the default components to wait for + DefaultComponents = map[string]bool{APIServerWaitKey: true, SystemPodsWaitKey: true} + // NoWaitComponents is map of componets to wait for if specified 'none' or 'false' + NoComponents = map[string]bool{APIServerWaitKey: false, SystemPodsWaitKey: false, DefaultSAWaitKey: false, AppsRunning: false} + // AllComponents is map for waiting for all components. + AllComponents = map[string]bool{APIServerWaitKey: true, SystemPodsWaitKey: true, DefaultSAWaitKey: true, AppsRunning: true} + // DefaultWaitList is list of all default components to wait for. only names to be used for start flags. + DefaultWaitList = []string{APIServerWaitKey, SystemPodsWaitKey} + // AllComponentsList list of all valid components keys to wait for. only names to be used used for start flags. + AllComponentsList = []string{APIServerWaitKey, SystemPodsWaitKey, DefaultSAWaitKey, AppsRunning} + // AppsRunningList running list are valid k8s-app components to wait for them to be running + AppsRunningList = []string{ + "kube-dns", // coredns + "etcd", + "kube-apiserver", + "kube-controller-manager", + "kube-proxy", + "kube-scheduler", } - glog.Infof("duration metric: took %s to wait for apiserver process to appear ...", time.Since(start)) - return nil -} - -// apiServerPID returns our best guess to the apiserver pid -func apiServerPID(cr command.Runner) (int, error) { - rr, err := cr.RunCmd(exec.Command("sudo", "pgrep", "-xnf", "kube-apiserver.*minikube.*")) - if err != nil { - return 0, err - } - s := strings.TrimSpace(rr.Stdout.String()) - return strconv.Atoi(s) -} - -// SystemPods verifies essential pods for running kurnetes is running -func SystemPods(client *kubernetes.Clientset, start time.Time, timeout time.Duration) error { - glog.Info("waiting for kube-system pods to appear ...") - pStart := time.Now() - podList := func() (bool, error) { - if time.Since(start) > timeout { - return false, fmt.Errorf("cluster wait timed out during pod check") - } - // Wait for any system pod, as waiting for apiserver may block until etcd - pods, err := client.CoreV1().Pods("kube-system").List(meta.ListOptions{}) - if err != nil { - glog.Warningf("pod list returned error: %v", err) - return false, nil - } - glog.Infof("%d kube-system pods found", len(pods.Items)) - if len(pods.Items) < 2 { - return false, nil - } - return true, nil - } - if err := wait.PollImmediate(kconst.APICallRetryInterval, kconst.DefaultControlPlaneTimeout, podList); err != nil { - return fmt.Errorf("apiserver never returned a pod list") - } - glog.Infof("duration metric: took %s to wait for pod list to return data ...", time.Since(pStart)) - return nil -} - -// APIServerIsRunning waits for api server status to be running -func APIServerIsRunning(start time.Time, ip string, port int, timeout time.Duration) error { - glog.Infof("waiting for apiserver healthz status ...") - hStart := time.Now() - healthz := func() (bool, error) { - if time.Since(start) > timeout { - return false, fmt.Errorf("cluster wait timed out during healthz check") - } - - status, err := apiServerHealthz(net.ParseIP(ip), port) - if err != nil { - glog.Warningf("status: %v", err) - return false, nil - } - if status != state.Running { - return false, nil - } - return true, nil - } - - if err := wait.PollImmediate(kconst.APICallRetryInterval, kconst.DefaultControlPlaneTimeout, healthz); err != nil { - return fmt.Errorf("apiserver healthz never reported healthy") - } - glog.Infof("duration metric: took %s to wait for apiserver healthz status ...", time.Since(hStart)) - return nil -} - -// APIServerStatus returns apiserver status in libmachine style state.State -func APIServerStatus(cr command.Runner, ip net.IP, port int) (state.State, error) { - glog.Infof("Checking apiserver status ...") - - pid, err := apiServerPID(cr) - if err != nil { - glog.Warningf("unable to get apiserver pid: %v", err) - return state.Stopped, nil - } - - // Get the freezer cgroup entry for this pid - rr, err := cr.RunCmd(exec.Command("sudo", "egrep", "^[0-9]+:freezer:", fmt.Sprintf("/proc/%d/cgroup", pid))) - if err != nil { - glog.Warningf("unable to find freezer cgroup: %v", err) - return apiServerHealthz(ip, port) - - } - freezer := strings.TrimSpace(rr.Stdout.String()) - glog.Infof("apiserver freezer: %q", freezer) - fparts := strings.Split(freezer, ":") - if len(fparts) != 3 { - glog.Warningf("unable to parse freezer - found %d parts: %s", len(fparts), freezer) - return apiServerHealthz(ip, port) - } - - rr, err = cr.RunCmd(exec.Command("sudo", "cat", path.Join("/sys/fs/cgroup/freezer", fparts[2], "freezer.state"))) - if err != nil { - glog.Errorf("unable to get freezer state: %s", rr.Stderr.String()) - return apiServerHealthz(ip, port) - } - - fs := strings.TrimSpace(rr.Stdout.String()) - glog.Infof("freezer state: %q", fs) - if fs == "FREEZING" || fs == "FROZEN" { - return state.Paused, nil - } - return apiServerHealthz(ip, port) -} - -// apiServerHealthz hits the /healthz endpoint and returns libmachine style state.State -func apiServerHealthz(ip net.IP, port int) (state.State, error) { - url := fmt.Sprintf("https://%s/healthz", net.JoinHostPort(ip.String(), fmt.Sprint(port))) - glog.Infof("Checking apiserver healthz at %s ...", url) - // To avoid: x509: certificate signed by unknown authority - tr := &http.Transport{ - Proxy: nil, // To avoid connectiv issue if http(s)_proxy is set. - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - } - client := &http.Client{Transport: tr} - resp, err := client.Get(url) - // Connection refused, usually. - if err != nil { - return state.Stopped, nil - } - if resp.StatusCode != http.StatusOK { - glog.Warningf("%s response: %v %+v", url, err, resp) - return state.Error, nil - } - return state.Running, nil -} - -// KubeletStatus checks the kubelet status -func KubeletStatus(cr command.Runner) (state.State, error) { - glog.Infof("Checking kubelet status ...") - rr, err := cr.RunCmd(exec.Command("sudo", "systemctl", "is-active", "kubelet")) - if err != nil { - // Do not return now, as we still have parsing to do! - glog.Warningf("%s returned error: %v", rr.Command(), err) - } - s := strings.TrimSpace(rr.Stdout.String()) - glog.Infof("kubelet is-active: %s", s) - switch s { - case "active": - return state.Running, nil - case "inactive": - return state.Stopped, nil - case "activating": - return state.Starting, nil - } - return state.Error, nil +) + +// ShouldWait will return true if the config says need to wait +func ShouldWait(wcs map[string]bool) bool { + for _, c := range AllComponentsList { + if wcs[c] { + return true + } + } + return false } diff --git a/pkg/minikube/bootstrapper/bsutil/kverify/system_pods.go b/pkg/minikube/bootstrapper/bsutil/kverify/system_pods.go new file mode 100644 index 0000000000..35dd40a858 --- /dev/null +++ b/pkg/minikube/bootstrapper/bsutil/kverify/system_pods.go @@ -0,0 +1,166 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package kverify verifies a running kubernetes cluster is healthy +package kverify + +import ( + "fmt" + "strings" + "time" + + "github.com/docker/machine/libmachine/state" + "github.com/golang/glog" + "github.com/pkg/errors" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants" + "k8s.io/minikube/pkg/minikube/bootstrapper" + "k8s.io/minikube/pkg/minikube/command" + "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/cruntime" + "k8s.io/minikube/pkg/minikube/logs" + "k8s.io/minikube/pkg/minikube/sysinit" +) + +// WaitForSystemPods verifies essential pods for running kurnetes is running +func WaitForSystemPods(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner, client *kubernetes.Clientset, start time.Time, timeout time.Duration) error { + glog.Info("waiting for kube-system pods to appear ...") + pStart := time.Now() + + podList := func() (bool, error) { + if time.Since(start) > timeout { + return false, fmt.Errorf("cluster wait timed out during pod check") + } + if time.Since(start) > minLogCheckTime { + announceProblems(r, bs, cfg, cr) + time.Sleep(kconst.APICallRetryInterval * 5) + } + + // Wait for any system pod, as waiting for apiserver may block until etcd + pods, err := client.CoreV1().Pods("kube-system").List(meta.ListOptions{}) + if err != nil { + glog.Warningf("pod list returned error: %v", err) + return false, nil + } + glog.Infof("%d kube-system pods found", len(pods.Items)) + for _, pod := range pods.Items { + glog.Infof(podStatusMsg(pod)) + } + + if len(pods.Items) < 2 { + return false, nil + } + return true, nil + } + if err := wait.PollImmediate(kconst.APICallRetryInterval, timeout, podList); err != nil { + return fmt.Errorf("apiserver never returned a pod list") + } + glog.Infof("duration metric: took %s to wait for pod list to return data ...", time.Since(pStart)) + return nil +} + +// ExpectAppsRunning returns whether or not all expected k8s-apps are running. (without waiting for them) +func ExpectAppsRunning(cs *kubernetes.Clientset, expected []string) error { + found := map[string]bool{} + + pods, err := cs.CoreV1().Pods("kube-system").List(meta.ListOptions{}) + if err != nil { + return err + } + + for _, pod := range pods.Items { + if pod.Status.Phase != core.PodRunning { + continue + } + for k, v := range pod.ObjectMeta.Labels { + if k == "component" || k == "k8s-app" { + found[v] = true + } + } + } + + missing := []string{} + for _, e := range expected { + if !found[e] { + missing = append(missing, e) + } + } + if len(missing) > 0 { + return fmt.Errorf("missing components: %v", strings.Join(missing, ", ")) + } + return nil +} + +// WaitForAppsRunning waits for expected Apps To be running +func WaitForAppsRunning(cs *kubernetes.Clientset, expected []string, timeout time.Duration) error { + glog.Info("waiting for k8s-apps to be running ...") + start := time.Now() + + checkRunning := func() (bool, error) { + if err := ExpectAppsRunning(cs, expected); err != nil { + return false, nil + } + return true, nil + } + + if err := wait.PollImmediate(kconst.APICallRetryInterval, timeout, checkRunning); err != nil { + return errors.Wrapf(err, "checking k8s-apps to be running") + } + glog.Infof("duration metric: took %s to wait for k8s-apps to be running ...", time.Since(start)) + return nil +} + +// podStatusMsg returns a human-readable pod status, for generating debug status +func podStatusMsg(pod core.Pod) string { + var sb strings.Builder + sb.WriteString(fmt.Sprintf("%q [%s] %s", pod.ObjectMeta.GetName(), pod.ObjectMeta.GetUID(), pod.Status.Phase)) + for i, c := range pod.Status.Conditions { + if c.Reason != "" { + if i == 0 { + sb.WriteString(": ") + } else { + sb.WriteString(" / ") + } + sb.WriteString(fmt.Sprintf("%s:%s", c.Type, c.Reason)) + } + if c.Message != "" { + sb.WriteString(fmt.Sprintf(" (%s)", c.Message)) + } + } + return sb.String() +} + +// announceProblems checks for problems, and slows polling down if any are found +func announceProblems(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner) { + problems := logs.FindProblems(r, bs, cfg, cr) + if len(problems) > 0 { + logs.OutputProblems(problems, 5) + time.Sleep(kconst.APICallRetryInterval * 15) + } +} + +// KubeletStatus checks the kubelet status +func KubeletStatus(cr command.Runner) state.State { + glog.Infof("Checking kubelet status ...") + active := sysinit.New(cr).Active("kubelet") + if active { + return state.Running + } + return state.Stopped +} diff --git a/pkg/minikube/bootstrapper/bsutil/ops.go b/pkg/minikube/bootstrapper/bsutil/ops.go index bf855a9210..d364aa0748 100644 --- a/pkg/minikube/bootstrapper/bsutil/ops.go +++ b/pkg/minikube/bootstrapper/bsutil/ops.go @@ -47,7 +47,7 @@ func AdjustResourceLimits(c command.Runner) error { return nil } -// ExistingConfig checks if there are config files from possible previous kubernets cluster +// ExistingConfig checks if there are config files from possible previous kubernetes cluster func ExistingConfig(c command.Runner) error { args := append([]string{"ls"}, expectedRemoteArtifacts...) _, err := c.RunCmd(exec.Command("sudo", args...)) diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd-api-port.yaml index 7d94020c6f..ae79c8aa7a 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd-api-port.yaml @@ -4,7 +4,7 @@ noTaintMaster: true api: advertiseAddress: 1.1.1.1 bindPort: 12345 - controlPlaneEndpoint: localhost + controlPlaneEndpoint: 1.1.1.1 kubernetesVersion: v1.11.0 certificatesDir: /var/lib/minikube/certs networking: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd-pod-network-cidr.yaml index f66eec734e..a8ce3c8dc7 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd-pod-network-cidr.yaml @@ -4,7 +4,7 @@ noTaintMaster: true api: advertiseAddress: 1.1.1.1 bindPort: 8443 - controlPlaneEndpoint: localhost + controlPlaneEndpoint: 1.1.1.1 kubernetesVersion: v1.11.0 certificatesDir: /var/lib/minikube/certs networking: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd.yaml index f66eec734e..a8ce3c8dc7 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/containerd.yaml @@ -4,7 +4,7 @@ noTaintMaster: true api: advertiseAddress: 1.1.1.1 bindPort: 8443 - controlPlaneEndpoint: localhost + controlPlaneEndpoint: 1.1.1.1 kubernetesVersion: v1.11.0 certificatesDir: /var/lib/minikube/certs networking: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/crio-options-gates.yaml index 30b1986325..1a4d370e84 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/crio-options-gates.yaml @@ -4,7 +4,7 @@ noTaintMaster: true api: advertiseAddress: 1.1.1.1 bindPort: 8443 - controlPlaneEndpoint: localhost + controlPlaneEndpoint: 1.1.1.1 kubernetesVersion: v1.11.0 certificatesDir: /var/lib/minikube/certs networking: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/crio.yaml index 4693643125..e179fbf4e3 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/crio.yaml @@ -4,7 +4,7 @@ noTaintMaster: true api: advertiseAddress: 1.1.1.1 bindPort: 8443 - controlPlaneEndpoint: localhost + controlPlaneEndpoint: 1.1.1.1 kubernetesVersion: v1.11.0 certificatesDir: /var/lib/minikube/certs networking: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/default.yaml index 5c2861101e..68429da7bc 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/default.yaml @@ -4,7 +4,7 @@ noTaintMaster: true api: advertiseAddress: 1.1.1.1 bindPort: 8443 - controlPlaneEndpoint: localhost + controlPlaneEndpoint: 1.1.1.1 kubernetesVersion: v1.11.0 certificatesDir: /var/lib/minikube/certs networking: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/image-repository.yaml index 7d383865f8..651706493c 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/image-repository.yaml @@ -4,7 +4,7 @@ noTaintMaster: true api: advertiseAddress: 1.1.1.1 bindPort: 8443 - controlPlaneEndpoint: localhost + controlPlaneEndpoint: 1.1.1.1 kubernetesVersion: v1.11.0 certificatesDir: /var/lib/minikube/certs networking: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/options.yaml index 26fbfead4b..5b192e1cfd 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.11/options.yaml @@ -4,7 +4,7 @@ noTaintMaster: true api: advertiseAddress: 1.1.1.1 bindPort: 8443 - controlPlaneEndpoint: localhost + controlPlaneEndpoint: 1.1.1.1 kubernetesVersion: v1.11.0 certificatesDir: /var/lib/minikube/certs networking: diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-api-port.yaml index ba34af30df..e7ae37e44a 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-api-port.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:12345 +controlPlaneEndpoint: 1.1.1.1:12345 etcd: local: dataDir: /var/lib/minikube/etcd @@ -36,6 +36,8 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" nodefs.inodesFree: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-pod-network-cidr.yaml index 0d821692e5..d49b9ec306 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd-pod-network-cidr.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd @@ -36,6 +36,8 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" nodefs.inodesFree: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd.yaml index 8ac889649f..e8bc0ce7da 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/containerd.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd @@ -36,6 +36,8 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" nodefs.inodesFree: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio-options-gates.yaml index 5fb536a9f5..71c60951ac 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio-options-gates.yaml @@ -30,9 +30,9 @@ schedulerExtraArgs: feature-gates: "a=b" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd @@ -44,6 +44,8 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" nodefs.inodesFree: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio.yaml index a2e258468b..1f79aeb4d6 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/crio.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd @@ -36,6 +36,8 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" nodefs.inodesFree: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/default.yaml index 6db4345453..5588fe6c75 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/default.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd @@ -36,6 +36,8 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" nodefs.inodesFree: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/dns.yaml index e0b60901ab..7092fb6277 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/dns.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd @@ -36,6 +36,8 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" nodefs.inodesFree: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/image-repository.yaml index 595bd0c94c..a22a4d75cc 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/image-repository.yaml @@ -23,9 +23,9 @@ imageRepository: test/repo apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd @@ -37,6 +37,8 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" nodefs.inodesFree: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/options.yaml index 04237f4db1..1303e6b6ac 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.12/options.yaml @@ -27,9 +27,9 @@ controllerManagerExtraArgs: schedulerExtraArgs: scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd @@ -41,6 +41,8 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" nodefs.inodesFree: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-api-port.yaml index e4e9c885b2..4389e10b5a 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-api-port.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:12345 +controlPlaneEndpoint: 1.1.1.1:12345 etcd: local: dataDir: /var/lib/minikube/etcd @@ -36,6 +36,8 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" nodefs.inodesFree: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-pod-network-cidr.yaml index ee58cf2201..71d7a676be 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd-pod-network-cidr.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd @@ -36,6 +36,8 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" nodefs.inodesFree: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd.yaml index a719307679..485394334f 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/containerd.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd @@ -36,6 +36,8 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" nodefs.inodesFree: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio-options-gates.yaml index be69a16ec7..4fa17efa5e 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio-options-gates.yaml @@ -30,9 +30,9 @@ schedulerExtraArgs: feature-gates: "a=b" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd @@ -44,6 +44,8 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" nodefs.inodesFree: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio.yaml index c195ffc2ba..f5daeef55d 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/crio.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd @@ -36,6 +36,8 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" nodefs.inodesFree: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/default.yaml index f7fc9b5199..d99a3fbfaf 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/default.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd @@ -36,6 +36,8 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" nodefs.inodesFree: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/dns.yaml index d9bb198b8f..23bac82149 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/dns.yaml @@ -22,9 +22,9 @@ kind: ClusterConfiguration apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd @@ -36,6 +36,8 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" nodefs.inodesFree: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/image-repository.yaml index 0a1e7bab7b..924f4fc5a9 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/image-repository.yaml @@ -23,9 +23,9 @@ imageRepository: test/repo apiServerExtraArgs: enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd @@ -37,6 +37,8 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" nodefs.inodesFree: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/options.yaml index 3aa0b74754..3d9770bed6 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.13/options.yaml @@ -27,9 +27,9 @@ controllerManagerExtraArgs: schedulerExtraArgs: scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes +clusterName: mk apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"] -controlPlaneEndpoint: localhost:8443 +controlPlaneEndpoint: 1.1.1.1:8443 etcd: local: dataDir: /var/lib/minikube/etcd @@ -41,6 +41,8 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" nodefs.inodesFree: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-api-port.yaml index 741ad12afb..fc26257b55 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-api-port.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:12345 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:12345 dns: type: CoreDNS etcd: @@ -41,6 +41,7 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-pod-network-cidr.yaml index 54abf05793..9608b92860 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd-pod-network-cidr.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -41,6 +41,7 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd.yaml index df4740aaeb..c3a54825dd 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/containerd.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -41,6 +41,7 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio-options-gates.yaml index 513e1f803a..7b43973e7d 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio-options-gates.yaml @@ -34,8 +34,8 @@ scheduler: feature-gates: "a=b" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -51,6 +51,7 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio.yaml index 1053c5c42f..fa5b62cb02 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/crio.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -41,6 +41,7 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/default.yaml index 117c9070bf..84f68f98fc 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/default.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -41,6 +41,7 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/dns.yaml index 67c0df83a3..8791cf879f 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/dns.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -41,6 +41,7 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/image-repository.yaml index c720ebac42..b1c12c218e 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/image-repository.yaml @@ -25,8 +25,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -42,6 +42,7 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/options.yaml index 35aa4982b2..72302c5654 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.14/options.yaml @@ -31,8 +31,8 @@ scheduler: extraArgs: scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -48,6 +48,7 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-api-port.yaml index 3048061426..307f517ace 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-api-port.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:12345 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:12345 dns: type: CoreDNS etcd: @@ -41,6 +41,7 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-pod-network-cidr.yaml index 3a180ccafe..9a0077f387 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd-pod-network-cidr.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -41,6 +41,7 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd.yaml index 75a083a4ce..3fee964be4 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/containerd.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -41,6 +41,7 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio-options-gates.yaml index 587faaf4de..b07e909cc2 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio-options-gates.yaml @@ -34,8 +34,8 @@ scheduler: feature-gates: "a=b" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -51,6 +51,7 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio.yaml index 680b24fe8d..c30eed071b 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/crio.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -41,6 +41,7 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/default.yaml index 4ac5254431..99c6275ebd 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/default.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -41,6 +41,7 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/dns.yaml index 2403f96063..7a41b4cd55 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/dns.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -41,6 +41,7 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/image-repository.yaml index 9e3d3e5088..053a5977f9 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/image-repository.yaml @@ -25,8 +25,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -42,6 +42,7 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/options.yaml index cf7d8c2964..bac9bdd0c1 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.15/options.yaml @@ -31,8 +31,8 @@ scheduler: extraArgs: scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -48,6 +48,7 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-api-port.yaml index 2f1d050a40..1ac14c682a 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-api-port.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:12345 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:12345 dns: type: CoreDNS etcd: @@ -41,6 +41,7 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-pod-network-cidr.yaml index ad749f03cc..0a43dd453b 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd-pod-network-cidr.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -41,6 +41,7 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd.yaml index ddc2d7cf74..13ebbb05a2 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/containerd.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -41,6 +41,7 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio-options-gates.yaml index adbc88e1d7..56388066f7 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio-options-gates.yaml @@ -34,8 +34,8 @@ scheduler: feature-gates: "a=b" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -51,6 +51,7 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio.yaml index d401b50e81..70d0f5219b 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/crio.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -41,6 +41,7 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/default.yaml index bf4ee2a96a..fd99dfc616 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/default.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -41,6 +41,7 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/dns.yaml index 9b464ae194..8c880bada8 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/dns.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -41,6 +41,7 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/image-repository.yaml index 140db5ca32..919063dda8 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/image-repository.yaml @@ -25,8 +25,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -42,6 +42,7 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/options.yaml index c7623c0e0f..bf5753a224 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.16/options.yaml @@ -31,8 +31,8 @@ scheduler: extraArgs: scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -48,6 +48,7 @@ networking: --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +# disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-api-port.yaml index cc6ffe6f0b..43b450f5c0 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-api-port.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:12345 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:12345 dns: type: CoreDNS etcd: @@ -36,3 +36,16 @@ networking: dnsDomain: cluster.local podSubnet: "" serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-pod-network-cidr.yaml index e2c0561977..9cc48c0109 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd-pod-network-cidr.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -36,3 +36,16 @@ networking: dnsDomain: cluster.local podSubnet: "192.168.32.0/20" serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd.yaml index 966a8bd993..fd4f8c324d 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/containerd.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -36,3 +36,16 @@ networking: dnsDomain: cluster.local podSubnet: "" serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio-options-gates.yaml index e8cbd19ca9..658763f28f 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio-options-gates.yaml @@ -34,8 +34,8 @@ scheduler: feature-gates: "a=b" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -46,3 +46,16 @@ networking: dnsDomain: cluster.local podSubnet: "" serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio.yaml index 7be7372044..f4c255c0c1 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/crio.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -36,3 +36,16 @@ networking: dnsDomain: cluster.local podSubnet: "" serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/default.yaml index 92d300e316..03b1632e50 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/default.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -36,3 +36,16 @@ networking: dnsDomain: cluster.local podSubnet: "" serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/dns.yaml index aaed488d7b..bf54c68796 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/dns.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -36,3 +36,16 @@ networking: dnsDomain: 1.1.1.1 podSubnet: "" serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/image-repository.yaml index 29539e671c..607c43532c 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/image-repository.yaml @@ -25,8 +25,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -37,3 +37,16 @@ networking: dnsDomain: cluster.local podSubnet: "" serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/options.yaml index 34ceceafab..2dfa4b76fd 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.17/options.yaml @@ -31,8 +31,8 @@ scheduler: extraArgs: scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -43,3 +43,16 @@ networking: dnsDomain: cluster.local podSubnet: "" serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-api-port.yaml index 8b43a8ff90..376b6dd2fa 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-api-port.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:12345 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:12345 dns: type: CoreDNS etcd: @@ -36,3 +36,16 @@ networking: dnsDomain: cluster.local podSubnet: "" serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-pod-network-cidr.yaml index df0718542e..910e9e9596 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd-pod-network-cidr.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -36,3 +36,16 @@ networking: dnsDomain: cluster.local podSubnet: "192.168.32.0/20" serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd.yaml index 5ad344a4ae..30b062ee58 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/containerd.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -36,3 +36,16 @@ networking: dnsDomain: cluster.local podSubnet: "" serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio-options-gates.yaml index f0b4a18869..2f787eb95f 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio-options-gates.yaml @@ -34,8 +34,8 @@ scheduler: feature-gates: "a=b" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -46,3 +46,16 @@ networking: dnsDomain: cluster.local podSubnet: "" serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio.yaml index 87cd69c53c..ab798b8bf7 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/crio.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -36,3 +36,16 @@ networking: dnsDomain: cluster.local podSubnet: "" serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/default.yaml index 9e2c74b129..12ae87fcd3 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/default.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -36,3 +36,16 @@ networking: dnsDomain: cluster.local podSubnet: "" serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/dns.yaml index 5c654c39cb..abb27ec9fb 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/dns.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -36,3 +36,16 @@ networking: dnsDomain: 1.1.1.1 podSubnet: "" serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/image-repository.yaml index afcdcea565..c22581b268 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/image-repository.yaml @@ -25,8 +25,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -37,3 +37,16 @@ networking: dnsDomain: cluster.local podSubnet: "" serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/options.yaml index 014ce83eef..65e991dbce 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.18/options.yaml @@ -31,8 +31,8 @@ scheduler: extraArgs: scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -43,3 +43,16 @@ networking: dnsDomain: cluster.local podSubnet: "" serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-api-port.yaml index 74fd2313e6..5c7c731951 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-api-port.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:12345 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:12345 dns: type: CoreDNS etcd: @@ -36,3 +36,16 @@ networking: dnsDomain: cluster.local podSubnet: "" serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-pod-network-cidr.yaml index 79de1f5123..833a7ce604 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd-pod-network-cidr.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -36,3 +36,16 @@ networking: dnsDomain: cluster.local podSubnet: "192.168.32.0/20" serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd.yaml index 64df350765..6e2bca9c65 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/containerd.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -36,3 +36,16 @@ networking: dnsDomain: cluster.local podSubnet: "" serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio-options-gates.yaml index be8c0aec3c..9183325cff 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio-options-gates.yaml @@ -34,8 +34,8 @@ scheduler: feature-gates: "a=b" scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -46,3 +46,16 @@ networking: dnsDomain: cluster.local podSubnet: "" serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio.yaml index f0f0d6318d..7c8a91362f 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/crio.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -36,3 +36,16 @@ networking: dnsDomain: cluster.local podSubnet: "" serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/default.yaml index cfe2ec33de..efe78f9ea4 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/default.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -36,3 +36,16 @@ networking: dnsDomain: cluster.local podSubnet: "" serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/dns.yaml index dbbd63b757..c771a624ee 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/dns.yaml @@ -24,8 +24,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -36,3 +36,16 @@ networking: dnsDomain: 1.1.1.1 podSubnet: "" serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/image-repository.yaml index 2abe0945cf..0d9e4b7b4a 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/image-repository.yaml @@ -25,8 +25,8 @@ apiServer: extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -37,3 +37,16 @@ networking: dnsDomain: cluster.local podSubnet: "" serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/options.yaml index c35c0ea588..c288ba36c4 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.19/options.yaml @@ -31,8 +31,8 @@ scheduler: extraArgs: scheduler-name: "mini-scheduler" certificatesDir: /var/lib/minikube/certs -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 +clusterName: mk +controlPlaneEndpoint: 1.1.1.1:8443 dns: type: CoreDNS etcd: @@ -43,3 +43,16 @@ networking: dnsDomain: cluster.local podSubnet: "" serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +# disable disk resource management by default +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +metricsBindAddress: 1.1.1.1:10249 diff --git a/pkg/minikube/bootstrapper/certs.go b/pkg/minikube/bootstrapper/certs.go index 652392cb65..3099a29b32 100644 --- a/pkg/minikube/bootstrapper/certs.go +++ b/pkg/minikube/bootstrapper/certs.go @@ -17,6 +17,7 @@ limitations under the License. package bootstrapper import ( + "crypto/sha1" "encoding/pem" "fmt" "io/ioutil" @@ -25,9 +26,11 @@ import ( "os/exec" "path" "path/filepath" + "sort" "strings" "github.com/golang/glog" + "github.com/otiai10/copy" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/clientcmd/api" @@ -40,63 +43,50 @@ import ( "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/vmpath" "k8s.io/minikube/pkg/util" - "k8s.io/minikube/pkg/util/lock" - - "github.com/juju/mutex" -) - -var ( - certs = []string{ - "ca.crt", "ca.key", "apiserver.crt", "apiserver.key", "proxy-client-ca.crt", - "proxy-client-ca.key", "proxy-client.crt", "proxy-client.key", - } ) // SetupCerts gets the generated credentials required to talk to the APIServer. -func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node) error { - - localPath := localpath.MiniPath() +func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node) ([]assets.CopyableFile, error) { + localPath := localpath.Profile(k8s.ClusterName) glog.Infof("Setting up %s for IP: %s\n", localPath, n.IP) - // WARNING: This function was not designed for multiple profiles, so it is VERY racey: - // - // It updates a shared certificate file and uploads it to the apiserver before launch. - // - // If another process updates the shared certificate, it's invalid. - // TODO: Instead of racey manipulation of a shared certificate, use per-profile certs - spec := lock.PathMutexSpec(filepath.Join(localPath, "certs")) - glog.Infof("acquiring lock: %+v", spec) - releaser, err := mutex.Acquire(spec) + ccs, err := generateSharedCACerts() if err != nil { - return errors.Wrapf(err, "unable to acquire lock for %+v", spec) + return nil, errors.Wrap(err, "shared CA certs") } - defer releaser.Release() - if err := generateCerts(k8s, n); err != nil { - return errors.Wrap(err, "Error generating certs") + xfer, err := generateProfileCerts(k8s, n, ccs) + if err != nil { + return nil, errors.Wrap(err, "profile certs") } + + xfer = append(xfer, ccs.caCert) + xfer = append(xfer, ccs.caKey) + xfer = append(xfer, ccs.proxyCert) + xfer = append(xfer, ccs.proxyKey) + copyableFiles := []assets.CopyableFile{} - for _, cert := range certs { - p := filepath.Join(localPath, cert) + for _, p := range xfer { + cert := filepath.Base(p) perms := "0644" if strings.HasSuffix(cert, ".key") { perms = "0600" } certFile, err := assets.NewFileAsset(p, vmpath.GuestKubernetesCertsDir, cert, perms) if err != nil { - return err + return nil, errors.Wrapf(err, "key asset %s", cert) } copyableFiles = append(copyableFiles, certFile) } caCerts, err := collectCACerts() if err != nil { - return err + return nil, err } for src, dst := range caCerts { certFile, err := assets.NewFileAsset(src, path.Dir(dst), path.Base(dst), "0644") if err != nil { - return err + return nil, errors.Wrapf(err, "ca asset %s", src) } copyableFiles = append(copyableFiles, certFile) @@ -114,58 +104,89 @@ func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node) kubeCfg := api.NewConfig() err = kubeconfig.PopulateFromSettings(kcs, kubeCfg) if err != nil { - return errors.Wrap(err, "populating kubeconfig") + return nil, errors.Wrap(err, "populating kubeconfig") } data, err := runtime.Encode(latest.Codec, kubeCfg) if err != nil { - return errors.Wrap(err, "encoding kubeconfig") + return nil, errors.Wrap(err, "encoding kubeconfig") } - kubeCfgFile := assets.NewMemoryAsset(data, vmpath.GuestPersistentDir, "kubeconfig", "0644") - copyableFiles = append(copyableFiles, kubeCfgFile) + if n.ControlPlane { + kubeCfgFile := assets.NewMemoryAsset(data, vmpath.GuestPersistentDir, "kubeconfig", "0644") + copyableFiles = append(copyableFiles, kubeCfgFile) + } for _, f := range copyableFiles { if err := cmd.Copy(f); err != nil { - return errors.Wrapf(err, "Copy %s", f.GetAssetName()) + return nil, errors.Wrapf(err, "Copy %s", f.GetSourcePath()) } } if err := installCertSymlinks(cmd, caCerts); err != nil { - return errors.Wrapf(err, "certificate symlinks") + return nil, errors.Wrapf(err, "certificate symlinks") } - return nil + return copyableFiles, nil } -func generateCerts(k8s config.KubernetesConfig, n config.Node) error { - serviceIP, err := util.GetServiceClusterIP(k8s.ServiceCIDR) - if err != nil { - return errors.Wrap(err, "getting service cluster ip") +// CACerts has cert and key for CA (and Proxy) +type CACerts struct { + caCert string + caKey string + proxyCert string + proxyKey string +} + +// generateSharedCACerts generates CA certs shared among profiles, but only if missing +func generateSharedCACerts() (CACerts, error) { + globalPath := localpath.MiniPath() + cc := CACerts{ + caCert: localpath.CACert(), + caKey: filepath.Join(globalPath, "ca.key"), + proxyCert: filepath.Join(globalPath, "proxy-client-ca.crt"), + proxyKey: filepath.Join(globalPath, "proxy-client-ca.key"), } - localPath := localpath.MiniPath() - caCertPath := filepath.Join(localPath, "ca.crt") - caKeyPath := filepath.Join(localPath, "ca.key") - - proxyClientCACertPath := filepath.Join(localPath, "proxy-client-ca.crt") - proxyClientCAKeyPath := filepath.Join(localPath, "proxy-client-ca.key") - caCertSpecs := []struct { certPath string keyPath string subject string }{ { // client / apiserver CA - certPath: caCertPath, - keyPath: caKeyPath, + certPath: cc.caCert, + keyPath: cc.caKey, subject: "minikubeCA", }, { // proxy-client CA - certPath: proxyClientCACertPath, - keyPath: proxyClientCAKeyPath, + certPath: cc.proxyCert, + keyPath: cc.proxyKey, subject: "proxyClientCA", }, } + for _, ca := range caCertSpecs { + if canRead(ca.certPath) && canRead(ca.keyPath) { + glog.Infof("skipping %s CA generation: %s", ca.subject, ca.keyPath) + continue + } + + glog.Infof("generating %s CA: %s", ca.subject, ca.keyPath) + if err := util.GenerateCACert(ca.certPath, ca.keyPath, ca.subject); err != nil { + return cc, errors.Wrap(err, "generate ca cert") + } + } + + return cc, nil +} + +// generateProfileCerts generates profile certs for a profile +func generateProfileCerts(k8s config.KubernetesConfig, n config.Node, ccs CACerts) ([]string, error) { + profilePath := localpath.Profile(k8s.ClusterName) + + serviceIP, err := util.GetServiceClusterIP(k8s.ServiceCIDR) + if err != nil { + return nil, errors.Wrap(err, "getting service cluster ip") + } + apiServerIPs := append( k8s.APIServerIPs, []net.IP{net.ParseIP(n.IP), serviceIP, net.ParseIP(oci.DefaultBindIPV4), net.ParseIP("10.0.0.1")}...) @@ -174,9 +195,19 @@ func generateCerts(k8s config.KubernetesConfig, n config.Node) error { apiServerNames, util.GetAlternateDNS(k8s.DNSDomain)...) - signedCertSpecs := []struct { - certPath string - keyPath string + // Generate a hash input for certs that depend on ip/name combinations + hi := []string{} + hi = append(hi, apiServerAlternateNames...) + for _, ip := range apiServerIPs { + hi = append(hi, ip.String()) + } + sort.Strings(hi) + + specs := []struct { + certPath string + keyPath string + hash string + subject string ips []net.IP alternateNames []string @@ -184,56 +215,77 @@ func generateCerts(k8s config.KubernetesConfig, n config.Node) error { caKeyPath string }{ { // Client cert - certPath: filepath.Join(localPath, "client.crt"), - keyPath: filepath.Join(localPath, "client.key"), + certPath: localpath.ClientCert(k8s.ClusterName), + keyPath: localpath.ClientKey(k8s.ClusterName), subject: "minikube-user", ips: []net.IP{}, alternateNames: []string{}, - caCertPath: caCertPath, - caKeyPath: caKeyPath, + caCertPath: ccs.caCert, + caKeyPath: ccs.caKey, }, { // apiserver serving cert - certPath: filepath.Join(localPath, "apiserver.crt"), - keyPath: filepath.Join(localPath, "apiserver.key"), + hash: fmt.Sprintf("%x", sha1.Sum([]byte(strings.Join(hi, "/"))))[0:8], + certPath: filepath.Join(profilePath, "apiserver.crt"), + keyPath: filepath.Join(profilePath, "apiserver.key"), subject: "minikube", ips: apiServerIPs, alternateNames: apiServerAlternateNames, - caCertPath: caCertPath, - caKeyPath: caKeyPath, + caCertPath: ccs.caCert, + caKeyPath: ccs.caKey, }, { // aggregator proxy-client cert - certPath: filepath.Join(localPath, "proxy-client.crt"), - keyPath: filepath.Join(localPath, "proxy-client.key"), + certPath: filepath.Join(profilePath, "proxy-client.crt"), + keyPath: filepath.Join(profilePath, "proxy-client.key"), subject: "aggregator", ips: []net.IP{}, alternateNames: []string{}, - caCertPath: proxyClientCACertPath, - caKeyPath: proxyClientCAKeyPath, + caCertPath: ccs.proxyCert, + caKeyPath: ccs.proxyKey, }, } - for _, caCertSpec := range caCertSpecs { - if !(canReadFile(caCertSpec.certPath) && - canReadFile(caCertSpec.keyPath)) { - if err := util.GenerateCACert( - caCertSpec.certPath, caCertSpec.keyPath, caCertSpec.subject, - ); err != nil { - return errors.Wrap(err, "Error generating CA certificate") + xfer := []string{} + for _, spec := range specs { + if spec.subject != "minikube-user" { + xfer = append(xfer, spec.certPath) + xfer = append(xfer, spec.keyPath) + } + + cp := spec.certPath + kp := spec.keyPath + if spec.hash != "" { + cp = cp + "." + spec.hash + kp = kp + "." + spec.hash + } + + if canRead(cp) && canRead(kp) { + glog.Infof("skipping %s signed cert generation: %s", spec.subject, kp) + continue + } + + glog.Infof("generating %s signed cert: %s", spec.subject, kp) + err := util.GenerateSignedCert( + cp, kp, spec.subject, + spec.ips, spec.alternateNames, + spec.caCertPath, spec.caKeyPath, + ) + if err != nil { + return xfer, errors.Wrapf(err, "generate signed cert for %q", spec.subject) + } + + if spec.hash != "" { + glog.Infof("copying %s -> %s", cp, spec.certPath) + if err := copy.Copy(cp, spec.certPath); err != nil { + return xfer, errors.Wrap(err, "copy cert") + } + glog.Infof("copying %s -> %s", kp, spec.keyPath) + if err := copy.Copy(kp, spec.keyPath); err != nil { + return xfer, errors.Wrap(err, "copy key") } } } - for _, signedCertSpec := range signedCertSpecs { - if err := util.GenerateSignedCert( - signedCertSpec.certPath, signedCertSpec.keyPath, signedCertSpec.subject, - signedCertSpec.ips, signedCertSpec.alternateNames, - signedCertSpec.caCertPath, signedCertSpec.caKeyPath, - ); err != nil { - return errors.Wrap(err, "Error generating signed apiserver serving cert") - } - } - - return nil + return xfer, nil } // isValidPEMCertificate checks whether the input file is a valid PEM certificate (with at least one CERTIFICATE block) @@ -270,19 +322,33 @@ func collectCACerts() (map[string]string, error) { if err != nil { return err } + if info == nil { + return nil + } + if info.IsDir() { + return nil + } - if info != nil && !info.IsDir() { - ext := strings.ToLower(filepath.Ext(hostpath)) - if ext == ".crt" || ext == ".pem" { - validPem, err := isValidPEMCertificate(hostpath) - if err != nil { - return err - } - if validPem { - filename := filepath.Base(hostpath) - dst := fmt.Sprintf("%s.%s", strings.TrimSuffix(filename, ext), "pem") - certFiles[hostpath] = path.Join(vmpath.GuestCertAuthDir, dst) - } + fullPath := filepath.Join(certsDir, hostpath) + ext := strings.ToLower(filepath.Ext(hostpath)) + + if ext == ".crt" || ext == ".pem" { + if info.Size() < 32 { + glog.Warningf("ignoring %s, impossibly tiny %d bytes", fullPath, info.Size()) + return nil + } + + glog.Infof("found cert: %s (%d bytes)", fullPath, info.Size()) + + validPem, err := isValidPEMCertificate(hostpath) + if err != nil { + return err + } + + if validPem { + filename := filepath.Base(hostpath) + dst := fmt.Sprintf("%s.%s", strings.TrimSuffix(filename, ext), "pem") + certFiles[hostpath] = path.Join(vmpath.GuestCertAuthDir, dst) } } return nil @@ -309,9 +375,16 @@ func collectCACerts() (map[string]string, error) { // getSubjectHash calculates Certificate Subject Hash for creating certificate symlinks func getSubjectHash(cr command.Runner, filePath string) (string, error) { + lrr, err := cr.RunCmd(exec.Command("ls", "-la", filePath)) + if err != nil { + return "", err + } + glog.Infof("hashing: %s", lrr.Stdout.String()) + rr, err := cr.RunCmd(exec.Command("openssl", "x509", "-hash", "-noout", "-in", filePath)) if err != nil { - return "", errors.Wrapf(err, rr.Command()) + crr, _ := cr.RunCmd(exec.Command("cat", filePath)) + return "", errors.Wrapf(err, "cert:\n%s\n---\n%s", lrr.Output(), crr.Stdout.String()) } stringHash := strings.TrimSpace(rr.Stdout.String()) return stringHash, nil @@ -333,31 +406,34 @@ func installCertSymlinks(cr command.Runner, caCerts map[string]string) error { for _, caCertFile := range caCerts { dstFilename := path.Base(caCertFile) certStorePath := path.Join(vmpath.GuestCertStoreDir, dstFilename) - // If the cert really exists, add a named symlink - cmd := fmt.Sprintf("test -f %s && ln -fs %s %s", caCertFile, caCertFile, certStorePath) + + cmd := fmt.Sprintf("test -s %s && ln -fs %s %s", caCertFile, caCertFile, certStorePath) if _, err := cr.RunCmd(exec.Command("sudo", "/bin/bash", "-c", cmd)); err != nil { return errors.Wrapf(err, "create symlink for %s", caCertFile) } - if hasSSLBinary { - subjectHash, err := getSubjectHash(cr, caCertFile) - if err != nil { - return errors.Wrapf(err, "calculate hash for cacert %s", caCertFile) - } - subjectHashLink := path.Join(vmpath.GuestCertStoreDir, fmt.Sprintf("%s.0", subjectHash)) - // NOTE: This symlink may exist, but point to a missing file - cmd := fmt.Sprintf("test -L %s || ln -fs %s %s", subjectHashLink, certStorePath, subjectHashLink) - if _, err := cr.RunCmd(exec.Command("sudo", "/bin/bash", "-c", cmd)); err != nil { - return errors.Wrapf(err, "create symlink for %s", caCertFile) - } + if !hasSSLBinary { + continue + } + + subjectHash, err := getSubjectHash(cr, caCertFile) + if err != nil { + return errors.Wrapf(err, "calculate hash for cacert %s", caCertFile) + } + subjectHashLink := path.Join(vmpath.GuestCertStoreDir, fmt.Sprintf("%s.0", subjectHash)) + + // NOTE: This symlink may exist, but point to a missing file + cmd = fmt.Sprintf("test -L %s || ln -fs %s %s", subjectHashLink, certStorePath, subjectHashLink) + if _, err := cr.RunCmd(exec.Command("sudo", "/bin/bash", "-c", cmd)); err != nil { + return errors.Wrapf(err, "create symlink for %s", caCertFile) } } return nil } -// canReadFile returns true if the file represented +// canRead returns true if the file represented // by path exists and is readable, otherwise false. -func canReadFile(path string) bool { +func canRead(path string) bool { f, err := os.Open(path) if err != nil { return false diff --git a/pkg/minikube/bootstrapper/certs_test.go b/pkg/minikube/bootstrapper/certs_test.go index d92e174660..4f93aad180 100644 --- a/pkg/minikube/bootstrapper/certs_test.go +++ b/pkg/minikube/bootstrapper/certs_test.go @@ -24,7 +24,6 @@ import ( "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" - "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/tests" "k8s.io/minikube/pkg/util" ) @@ -52,26 +51,14 @@ func TestSetupCerts(t *testing.T) { } expected := map[string]string{ - `sudo /bin/bash -c "test -f /usr/share/ca-certificates/mycert.pem && ln -fs /usr/share/ca-certificates/mycert.pem /etc/ssl/certs/mycert.pem"`: "-", - `sudo /bin/bash -c "test -f /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"`: "-", + `sudo /bin/bash -c "test -s /usr/share/ca-certificates/mycert.pem && ln -fs /usr/share/ca-certificates/mycert.pem /etc/ssl/certs/mycert.pem"`: "-", + `sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"`: "-", } f := command.NewFakeCommandRunner() f.SetCommandToOutput(expected) - var filesToBeTransferred []string - for _, cert := range certs { - filesToBeTransferred = append(filesToBeTransferred, filepath.Join(localpath.MiniPath(), cert)) - } - filesToBeTransferred = append(filesToBeTransferred, filepath.Join(localpath.MiniPath(), "ca.crt")) - filesToBeTransferred = append(filesToBeTransferred, filepath.Join(localpath.MiniPath(), "certs", "mycert.pem")) - - if err := SetupCerts(f, k8s, config.Node{}); err != nil { + _, err := SetupCerts(f, k8s, config.Node{}) + if err != nil { t.Fatalf("Error starting cluster: %v", err) } - for _, cert := range filesToBeTransferred { - _, err := f.GetFileToContents(cert) - if err != nil { - t.Errorf("Cert not generated: %s", cert) - } - } } diff --git a/pkg/minikube/bootstrapper/images/images.go b/pkg/minikube/bootstrapper/images/images.go index 1029e94007..4b619359b9 100644 --- a/pkg/minikube/bootstrapper/images/images.go +++ b/pkg/minikube/bootstrapper/images/images.go @@ -135,7 +135,7 @@ func dashboardFrontend(repo string) string { repo = "kubernetesui" } // See 'kubernetes-dashboard' in deploy/addons/dashboard/dashboard-dp.yaml - return path.Join(repo, "dashboard:v2.0.0-beta8") + return path.Join(repo, "dashboard:v2.0.0-rc6") } // dashboardMetrics returns the image used for the dashboard metrics scraper diff --git a/pkg/minikube/bootstrapper/images/images_test.go b/pkg/minikube/bootstrapper/images/images_test.go index 6a2edc5da2..e623a9002b 100644 --- a/pkg/minikube/bootstrapper/images/images_test.go +++ b/pkg/minikube/bootstrapper/images/images_test.go @@ -25,7 +25,7 @@ import ( func TestAuxiliary(t *testing.T) { want := []string{ "gcr.io/k8s-minikube/storage-provisioner:v1.8.1", - "kubernetesui/dashboard:v2.0.0-beta8", + "kubernetesui/dashboard:v2.0.0-rc6", "kubernetesui/metrics-scraper:v1.0.2", } got := auxiliary("") @@ -37,7 +37,7 @@ func TestAuxiliary(t *testing.T) { func TestAuxiliaryMirror(t *testing.T) { want := []string{ "test.mirror/storage-provisioner:v1.8.1", - "test.mirror/dashboard:v2.0.0-beta8", + "test.mirror/dashboard:v2.0.0-rc6", "test.mirror/metrics-scraper:v1.0.2", } got := auxiliary("test.mirror") diff --git a/pkg/minikube/bootstrapper/images/kubeadm_test.go b/pkg/minikube/bootstrapper/images/kubeadm_test.go index 1819254f23..d705b5e74c 100644 --- a/pkg/minikube/bootstrapper/images/kubeadm_test.go +++ b/pkg/minikube/bootstrapper/images/kubeadm_test.go @@ -38,7 +38,7 @@ func TestKubeadmImages(t *testing.T) { "k8s.gcr.io/etcd:3.4.3-0", "k8s.gcr.io/pause:3.1", "gcr.io/k8s-minikube/storage-provisioner:v1.8.1", - "kubernetesui/dashboard:v2.0.0-beta8", + "kubernetesui/dashboard:v2.0.0-rc6", "kubernetesui/metrics-scraper:v1.0.2", }}, {"v1.16.1", "mirror.k8s.io", []string{ @@ -50,7 +50,7 @@ func TestKubeadmImages(t *testing.T) { "mirror.k8s.io/etcd:3.3.15-0", "mirror.k8s.io/pause:3.1", "mirror.k8s.io/storage-provisioner:v1.8.1", - "mirror.k8s.io/dashboard:v2.0.0-beta8", + "mirror.k8s.io/dashboard:v2.0.0-rc6", "mirror.k8s.io/metrics-scraper:v1.0.2", }}, {"v1.15.0", "", []string{ @@ -62,7 +62,7 @@ func TestKubeadmImages(t *testing.T) { "k8s.gcr.io/etcd:3.3.10", "k8s.gcr.io/pause:3.1", "gcr.io/k8s-minikube/storage-provisioner:v1.8.1", - "kubernetesui/dashboard:v2.0.0-beta8", + "kubernetesui/dashboard:v2.0.0-rc6", "kubernetesui/metrics-scraper:v1.0.2", }}, {"v1.14.0", "", []string{ @@ -74,7 +74,7 @@ func TestKubeadmImages(t *testing.T) { "k8s.gcr.io/etcd:3.3.10", "k8s.gcr.io/pause:3.1", "gcr.io/k8s-minikube/storage-provisioner:v1.8.1", - "kubernetesui/dashboard:v2.0.0-beta8", + "kubernetesui/dashboard:v2.0.0-rc6", "kubernetesui/metrics-scraper:v1.0.2", }}, {"v1.13.0", "", []string{ @@ -86,7 +86,7 @@ func TestKubeadmImages(t *testing.T) { "k8s.gcr.io/etcd:3.2.24", "k8s.gcr.io/pause:3.1", "gcr.io/k8s-minikube/storage-provisioner:v1.8.1", - "kubernetesui/dashboard:v2.0.0-beta8", + "kubernetesui/dashboard:v2.0.0-rc6", "kubernetesui/metrics-scraper:v1.0.2", }}, {"v1.12.0", "", []string{ @@ -98,7 +98,7 @@ func TestKubeadmImages(t *testing.T) { "k8s.gcr.io/etcd:3.2.24", "k8s.gcr.io/pause:3.1", "gcr.io/k8s-minikube/storage-provisioner:v1.8.1", - "kubernetesui/dashboard:v2.0.0-beta8", + "kubernetesui/dashboard:v2.0.0-rc6", "kubernetesui/metrics-scraper:v1.0.2", }}, {"v1.11.10", "", []string{ @@ -110,7 +110,7 @@ func TestKubeadmImages(t *testing.T) { "k8s.gcr.io/etcd-amd64:3.2.18", "k8s.gcr.io/pause:3.1", "gcr.io/k8s-minikube/storage-provisioner:v1.8.1", - "kubernetesui/dashboard:v2.0.0-beta8", + "kubernetesui/dashboard:v2.0.0-rc6", "kubernetesui/metrics-scraper:v1.0.2", }}, } diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 92f896e4ef..735032af12 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors All rights reserved. +Copyright 2020 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,6 +21,7 @@ import ( "context" "os/exec" "path" + "sync" "fmt" "net" @@ -39,7 +40,6 @@ import ( "k8s.io/client-go/kubernetes" kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/minikube/pkg/drivers/kic" - "k8s.io/minikube/pkg/drivers/kic/oci" "k8s.io/minikube/pkg/kapi" "k8s.io/minikube/pkg/minikube/assets" "k8s.io/minikube/pkg/minikube/bootstrapper" @@ -53,8 +53,10 @@ import ( "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/sysinit" "k8s.io/minikube/pkg/minikube/vmpath" "k8s.io/minikube/pkg/util" + "k8s.io/minikube/pkg/util/retry" "k8s.io/minikube/pkg/version" ) @@ -80,29 +82,9 @@ func NewBootstrapper(api libmachine.API, cc config.ClusterConfig, n config.Node) return &Bootstrapper{c: runner, contextName: cc.Name, k8sClient: nil}, nil } -// GetKubeletStatus returns the kubelet status -func (k *Bootstrapper) GetKubeletStatus() (string, error) { - rr, err := k.c.RunCmd(exec.Command("sudo", "systemctl", "is-active", "kubelet")) - if err != nil { - // Do not return now, as we still have parsing to do! - glog.Warningf("%s returned error: %v", rr.Command(), err) - } - s := strings.TrimSpace(rr.Stdout.String()) - glog.Infof("kubelet is-active: %s", s) - switch s { - case "active": - return state.Running.String(), nil - case "inactive": - return state.Stopped.String(), nil - case "activating": - return state.Starting.String(), nil - } - return state.Error.String(), nil -} - // GetAPIServerStatus returns the api-server status -func (k *Bootstrapper) GetAPIServerStatus(ip net.IP, port int) (string, error) { - s, err := kverify.APIServerStatus(k.c, ip, port) +func (k *Bootstrapper) GetAPIServerStatus(hostname string, port int) (string, error) { + s, err := kverify.APIServerStatus(k.c, hostname, port) if err != nil { return state.Error.String(), err } @@ -110,7 +92,7 @@ func (k *Bootstrapper) GetAPIServerStatus(ip net.IP, port int) (string, error) { } // LogCommands returns a map of log type to a command which will display that log. -func (k *Bootstrapper) LogCommands(o bootstrapper.LogOptions) map[string]string { +func (k *Bootstrapper) LogCommands(cfg config.ClusterConfig, o bootstrapper.LogOptions) map[string]string { var kubelet strings.Builder kubelet.WriteString("sudo journalctl -u kubelet") if o.Lines > 0 { @@ -128,9 +110,15 @@ func (k *Bootstrapper) LogCommands(o bootstrapper.LogOptions) map[string]string if o.Lines > 0 { dmesg.WriteString(fmt.Sprintf(" | tail -n %d", o.Lines)) } + + describeNodes := fmt.Sprintf("sudo %s describe nodes --kubeconfig=%s", + path.Join(vmpath.GuestPersistentDir, "binaries", cfg.KubernetesConfig.KubernetesVersion, "kubectl"), + path.Join(vmpath.GuestPersistentDir, "kubeconfig")) + return map[string]string{ - "kubelet": kubelet.String(), - "dmesg": dmesg.String(), + "kubelet": kubelet.String(), + "dmesg": dmesg.String(), + "describe nodes": describeNodes, } } @@ -151,27 +139,38 @@ func (k *Bootstrapper) createCompatSymlinks() error { return nil } -// StartCluster starts the cluster -func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { - err := bsutil.ExistingConfig(k.c) - if err == nil { // if there is an existing cluster don't reconfigure it - return k.restartCluster(cfg) +// clearStaleConfigs clears configurations which may have stale IP addresses +func (k *Bootstrapper) clearStaleConfigs(cfg config.ClusterConfig) error { + cp, err := config.PrimaryControlPlane(&cfg) + if err != nil { + return err } - glog.Infof("existence check: %v", err) - start := time.Now() - glog.Infof("StartCluster: %+v", cfg) - defer func() { - glog.Infof("StartCluster complete in %s", time.Since(start)) - }() + paths := []string{ + "/etc/kubernetes/admin.conf", + "/etc/kubernetes/kubelet.conf", + "/etc/kubernetes/controller-manager.conf", + "/etc/kubernetes/scheduler.conf", + } + endpoint := fmt.Sprintf("https://%s", net.JoinHostPort(cp.IP, strconv.Itoa(cp.Port))) + for _, path := range paths { + _, err := k.c.RunCmd(exec.Command("sudo", "/bin/bash", "-c", fmt.Sprintf("grep %s %s || sudo rm -f %s", endpoint, path, path))) + if err != nil { + return err + } + } + return nil +} + +func (k *Bootstrapper) init(cfg config.ClusterConfig) error { version, err := util.ParseKubernetesVersion(cfg.KubernetesConfig.KubernetesVersion) if err != nil { return errors.Wrap(err, "parsing kubernetes version") } extraFlags := bsutil.CreateFlagsFromExtraArgs(cfg.KubernetesConfig.ExtraOptions) - r, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime}) + r, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: k.c}) if err != nil { return err } @@ -202,33 +201,119 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { } - c := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s init --config %s %s --ignore-preflight-errors=%s", bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), bsutil.KubeadmYamlPath, extraFlags, strings.Join(ignore, ","))) - rr, err := k.c.RunCmd(c) - if err != nil { - return errors.Wrapf(err, "init failed. output: %q", rr.Output()) + if err := k.clearStaleConfigs(cfg); err != nil { + return errors.Wrap(err, "clearing stale configs") } - if cfg.Driver == driver.Docker { - if err := k.applyKicOverlay(cfg); err != nil { - return errors.Wrap(err, "apply kic overlay") + conf := bsutil.KubeadmYamlPath + c := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s init --config %s %s --ignore-preflight-errors=%s", + bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), conf, extraFlags, strings.Join(ignore, ","))) + if _, err := k.c.RunCmd(c); err != nil { + return errors.Wrap(err, "run") + } + + var wg sync.WaitGroup + wg.Add(4) + + go func() { + // the overlay is required for containerd and cri-o runtime: see #7428 + if driver.IsKIC(cfg.Driver) && cfg.KubernetesConfig.ContainerRuntime != "docker" { + if err := k.applyKicOverlay(cfg); err != nil { + glog.Errorf("failed to apply kic overlay: %v", err) + } + } + wg.Done() + }() + + go func() { + if err := k.applyNodeLabels(cfg); err != nil { + glog.Warningf("unable to apply node labels: %v", err) + } + wg.Done() + }() + + go func() { + if err := bsutil.AdjustResourceLimits(k.c); err != nil { + glog.Warningf("unable to adjust resource limits: %v", err) + } + wg.Done() + }() + + go func() { + if err := k.elevateKubeSystemPrivileges(cfg); err != nil { + glog.Warningf("unable to create cluster role binding, some addons might not work: %v", err) + } + wg.Done() + }() + + wg.Wait() + return nil +} + +// unpause unpauses any Kubernetes backplane components +func (k *Bootstrapper) unpause(cfg config.ClusterConfig) error { + + cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: k.c}) + if err != nil { + return err + } + + ids, err := cr.ListContainers(cruntime.ListOptions{State: cruntime.Paused, Namespaces: []string{"kube-system"}}) + if err != nil { + return errors.Wrap(err, "list paused") + } + + if len(ids) > 0 { + if err := cr.UnpauseContainers(ids); err != nil { + return err } } - - if err := k.applyNodeLabels(cfg); err != nil { - glog.Warningf("unable to apply node labels: %v", err) - } - - if err := bsutil.AdjustResourceLimits(k.c); err != nil { - glog.Warningf("unable to adjust resource limits: %v", err) - } - - if err := k.elevateKubeSystemPrivileges(cfg); err != nil { - glog.Warningf("unable to create cluster role binding, some addons might not work : %v. ", err) - } - return nil } +// StartCluster starts the cluster +func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { + start := time.Now() + glog.Infof("StartCluster: %+v", cfg) + defer func() { + glog.Infof("StartCluster complete in %s", time.Since(start)) + }() + + // Before we start, ensure that no paused components are lurking around + if err := k.unpause(cfg); err != nil { + glog.Warningf("unpause failed: %v", err) + } + + if err := bsutil.ExistingConfig(k.c); err == nil { + glog.Infof("found existing configuration files, will attempt cluster restart") + rerr := k.restartCluster(cfg) + if rerr == nil { + return nil + } + out.ErrT(out.Embarrassed, "Unable to restart cluster, will reset it: {{.error}}", out.V{"error": rerr}) + if err := k.DeleteCluster(cfg.KubernetesConfig); err != nil { + glog.Warningf("delete failed: %v", err) + } + // Fall-through to init + } + + conf := bsutil.KubeadmYamlPath + if _, err := k.c.RunCmd(exec.Command("sudo", "cp", conf+".new", conf)); err != nil { + return errors.Wrap(err, "cp") + } + + err := k.init(cfg) + if err == nil { + return nil + } + + out.ErrT(out.Conflict, "initialization failed, will try again: {{.error}}", out.V{"error": err}) + if err := k.DeleteCluster(cfg.KubernetesConfig); err != nil { + glog.Warningf("delete failed: %v", err) + } + return k.init(cfg) +} + // client sets and returns a Kubernetes client to use to speak to a kubeadm launched apiserver func (k *Bootstrapper) client(ip string, port int) (*kubernetes.Clientset, error) { if k.k8sClient != nil { @@ -252,42 +337,108 @@ func (k *Bootstrapper) client(ip string, port int) (*kubernetes.Clientset, error return c, err } -// WaitForCluster blocks until the cluster appears to be healthy -func (k *Bootstrapper) WaitForCluster(cfg config.ClusterConfig, timeout time.Duration) error { +// WaitForNode blocks until the node appears to be healthy +func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, timeout time.Duration) error { start := time.Now() - out.T(out.Waiting, "Waiting for cluster to come online ...") - cp, err := config.PrimaryControlPlane(&cfg) - if err != nil { - return err + + if !n.ControlPlane { + glog.Infof("%s is not a control plane, nothing to wait for", n.Name) + return nil } - if err := kverify.APIServerProcess(k.c, start, timeout); err != nil { - return err + if !kverify.ShouldWait(cfg.VerifyComponents) { + glog.Infof("skip waiting for components based on config.") + return nil } - ip := cp.IP - port := cp.Port - if driver.IsKIC(cfg.Driver) { - ip = oci.DefaultBindIPV4 - port, err = oci.ForwardedPort(cfg.Driver, cfg.Name, port) + cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: k.c}) + if err != nil { + return errors.Wrapf(err, "create runtme-manager %s", cfg.KubernetesConfig.ContainerRuntime) + } + + hostname, _, port, err := driver.ControlPaneEndpoint(&cfg, &n, cfg.Driver) + if err != nil { + return errors.Wrap(err, "get control plane endpoint") + } + + if cfg.VerifyComponents[kverify.APIServerWaitKey] { + client, err := k.client(hostname, port) if err != nil { - return errors.Wrapf(err, "get host-bind port %d for container %s", port, cfg.Name) + return errors.Wrap(err, "get k8s client") + } + if err := kverify.WaitForAPIServerProcess(cr, k, cfg, k.c, start, timeout); err != nil { + return errors.Wrap(err, "wait for apiserver proc") + } + + if err := kverify.WaitForHealthyAPIServer(cr, k, cfg, k.c, client, start, hostname, port, timeout); err != nil { + return errors.Wrap(err, "wait for healthy API server") } } - if err := kverify.APIServerIsRunning(start, ip, port, timeout); err != nil { - return err + + if cfg.VerifyComponents[kverify.SystemPodsWaitKey] { + client, err := k.client(hostname, port) + if err != nil { + return errors.Wrap(err, "get k8s client") + } + if err := kverify.WaitForSystemPods(cr, k, cfg, k.c, client, start, timeout); err != nil { + return errors.Wrap(err, "waiting for system pods") + } } - c, err := k.client(ip, port) - if err != nil { - return errors.Wrap(err, "get k8s client") + if cfg.VerifyComponents[kverify.DefaultSAWaitKey] { + client, err := k.client(hostname, port) + if err != nil { + return errors.Wrap(err, "get k8s client") + } + if err := kverify.WaitForDefaultSA(client, timeout); err != nil { + return errors.Wrap(err, "waiting for default service account") + } } - if err := kverify.SystemPods(c, start, timeout); err != nil { - return errors.Wrap(err, "waiting for system pods") + if cfg.VerifyComponents[kverify.AppsRunning] { + client, err := k.client(hostname, port) + if err != nil { + return errors.Wrap(err, "get k8s client") + } + if err := kverify.WaitForAppsRunning(client, kverify.AppsRunningList, timeout); err != nil { + return errors.Wrap(err, "waiting for apps_running") + } } + + glog.Infof("duration metric: took %s to wait for : %+v ...", time.Since(start), cfg.VerifyComponents) return nil } +// needsReset returns whether or not the cluster needs to be reconfigured +func (k *Bootstrapper) needsReset(conf string, hostname string, port int, client *kubernetes.Clientset, version string) bool { + if rr, err := k.c.RunCmd(exec.Command("sudo", "diff", "-u", conf, conf+".new")); err != nil { + glog.Infof("needs reset: configs differ:\n%s", rr.Output()) + return true + } + + st, err := kverify.APIServerStatus(k.c, hostname, port) + if err != nil { + glog.Infof("needs reset: apiserver error: %v", err) + return true + } + + if st != state.Running { + glog.Infof("needs reset: apiserver in state %s", st) + return true + } + + if err := kverify.ExpectAppsRunning(client, kverify.AppsRunningList); err != nil { + glog.Infof("needs reset: %v", err) + return true + } + + if err := kverify.APIServerVersionMatch(client, version); err != nil { + glog.Infof("needs reset: %v", err) + return true + } + + return false +} + // restartCluster restarts the Kubernetes cluster configured by kubeadm func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { glog.Infof("restartCluster start") @@ -313,58 +464,124 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { glog.Errorf("failed to create compat symlinks: %v", err) } - baseCmd := fmt.Sprintf("%s %s", bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), phase) - cmds := []string{ - fmt.Sprintf("%s phase certs all --config %s", baseCmd, bsutil.KubeadmYamlPath), - fmt.Sprintf("%s phase kubeconfig all --config %s", baseCmd, bsutil.KubeadmYamlPath), - fmt.Sprintf("%s phase %s all --config %s", baseCmd, controlPlane, bsutil.KubeadmYamlPath), - fmt.Sprintf("%s phase etcd local --config %s", baseCmd, bsutil.KubeadmYamlPath), + cp, err := config.PrimaryControlPlane(&cfg) + if err != nil { + return errors.Wrap(err, "primary control plane") } + hostname, _, port, err := driver.ControlPaneEndpoint(&cfg, &cp, cfg.Driver) + if err != nil { + return errors.Wrap(err, "control plane") + } + + client, err := k.client(hostname, port) + if err != nil { + return errors.Wrap(err, "getting k8s client") + } + + // If the cluster is running, check if we have any work to do. + conf := bsutil.KubeadmYamlPath + if !k.needsReset(conf, hostname, port, client, cfg.KubernetesConfig.KubernetesVersion) { + glog.Infof("Taking a shortcut, as the cluster seems to be properly configured") + return nil + } + + if err := k.clearStaleConfigs(cfg); err != nil { + return errors.Wrap(err, "clearing stale configs") + } + + if _, err := k.c.RunCmd(exec.Command("sudo", "cp", conf+".new", conf)); err != nil { + return errors.Wrap(err, "cp") + } + + baseCmd := fmt.Sprintf("%s %s", bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), phase) + cmds := []string{ + fmt.Sprintf("%s phase certs all --config %s", baseCmd, conf), + fmt.Sprintf("%s phase kubeconfig all --config %s", baseCmd, conf), + fmt.Sprintf("%s phase %s all --config %s", baseCmd, controlPlane, conf), + fmt.Sprintf("%s phase etcd local --config %s", baseCmd, conf), + } + + glog.Infof("resetting cluster from %s", conf) // Run commands one at a time so that it is easier to root cause failures. for _, c := range cmds { - rr, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", c)) + _, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", c)) if err != nil { - return errors.Wrapf(err, "running cmd: %s", rr.Command()) + return errors.Wrap(err, "run") } } + cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: k.c}) + if err != nil { + return errors.Wrap(err, "runtime") + } + // We must ensure that the apiserver is healthy before proceeding - if err := kverify.APIServerProcess(k.c, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil { + if err := kverify.WaitForAPIServerProcess(cr, k, cfg, k.c, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil { return errors.Wrap(err, "apiserver healthz") } - for _, n := range cfg.Nodes { - ip := n.IP - port := n.Port - if driver.IsKIC(cfg.Driver) { - ip = oci.DefaultBindIPV4 - port, err = oci.ForwardedPort(cfg.Driver, cfg.Name, port) - if err != nil { - return errors.Wrapf(err, "get host-bind port %d for container %s", port, cfg.Name) - } - } - client, err := k.client(ip, port) - if err != nil { - return errors.Wrap(err, "getting k8s client") - } + if err := kverify.WaitForHealthyAPIServer(cr, k, cfg, k.c, client, time.Now(), hostname, port, kconst.DefaultControlPlaneTimeout); err != nil { + return errors.Wrap(err, "apiserver health") + } - if err := kverify.SystemPods(client, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil { - return errors.Wrap(err, "system pods") - } + if err := kverify.WaitForSystemPods(cr, k, cfg, k.c, client, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil { + return errors.Wrap(err, "system pods") + } - // Explicitly re-enable kubeadm addons (proxy, coredns) so that they will check for IP or configuration changes. - if rr, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("%s phase addon all --config %s", baseCmd, bsutil.KubeadmYamlPath))); err != nil { - return errors.Wrapf(err, fmt.Sprintf("addon phase cmd:%q", rr.Command())) - } + // This can fail during upgrades if the old pods have not shut down yet + addonPhase := func() error { + _, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("%s phase addon all --config %s", baseCmd, conf))) + return err + } + if err = retry.Expo(addonPhase, 1*time.Second, 30*time.Second); err != nil { + glog.Warningf("addon install failed, wil retry: %v", err) + return errors.Wrap(err, "addons") + } - if err := bsutil.AdjustResourceLimits(k.c); err != nil { - glog.Warningf("unable to adjust resource limits: %v", err) - } + if err := bsutil.AdjustResourceLimits(k.c); err != nil { + glog.Warningf("unable to adjust resource limits: %v", err) } return nil } +// JoinCluster adds a node to an existing cluster +func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinCmd string) error { + start := time.Now() + glog.Infof("JoinCluster: %+v", cc) + defer func() { + glog.Infof("JoinCluster complete in %s", time.Since(start)) + }() + + // Join the master by specifying its token + joinCmd = fmt.Sprintf("%s --v=10 --node-name=%s", joinCmd, driver.MachineName(cc, n)) + out, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", joinCmd)) + if err != nil { + return errors.Wrapf(err, "cmd failed: %s\n%+v\n", joinCmd, out) + } + + if _, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", "sudo systemctl daemon-reload && sudo systemctl enable kubelet && sudo systemctl start kubelet")); err != nil { + return errors.Wrap(err, "starting kubelet") + } + + return nil +} + +// GenerateToken creates a token and returns the appropriate kubeadm join command to run +func (k *Bootstrapper) GenerateToken(cc config.ClusterConfig) (string, error) { + tokenCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s token create --print-join-command --ttl=0", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion))) + r, err := k.c.RunCmd(tokenCmd) + if err != nil { + return "", errors.Wrap(err, "generating bootstrap token") + } + + joinCmd := r.Stdout.String() + joinCmd = strings.Replace(joinCmd, "kubeadm", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion), 1) + joinCmd = fmt.Sprintf("%s --ignore-preflight-errors=all", strings.TrimSpace(joinCmd)) + + return joinCmd, nil +} + // DeleteCluster removes the components that were started earlier func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error { version, err := util.ParseKubernetesVersion(k8s.KubernetesVersion) @@ -377,44 +594,86 @@ func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error { cmd = fmt.Sprintf("%s reset", bsutil.InvokeKubeadm(k8s.KubernetesVersion)) } - if rr, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", cmd)); err != nil { - return errors.Wrapf(err, "kubeadm reset: cmd: %q", rr.Command()) + rr, derr := k.c.RunCmd(exec.Command("/bin/bash", "-c", cmd)) + if derr != nil { + glog.Warningf("%s: %v", rr.Command(), err) } - return nil + if err := sysinit.New(k.c).ForceStop("kubelet"); err != nil { + glog.Warningf("stop kubelet: %v", err) + } + + cr, err := cruntime.New(cruntime.Config{Type: k8s.ContainerRuntime, Runner: k.c, Socket: k8s.CRISocket}) + if err != nil { + return errors.Wrap(err, "runtime") + } + + containers, err := cr.ListContainers(cruntime.ListOptions{Namespaces: []string{"kube-system"}}) + if err != nil { + glog.Warningf("unable to list kube-system containers: %v", err) + } + if len(containers) > 0 { + glog.Warningf("found %d kube-system containers to stop", len(containers)) + if err := cr.StopContainers(containers); err != nil { + glog.Warningf("error stopping containers: %v", err) + } + } + + return derr } // SetupCerts sets up certificates within the cluster. func (k *Bootstrapper) SetupCerts(k8s config.KubernetesConfig, n config.Node) error { - return bootstrapper.SetupCerts(k.c, k8s, n) + _, err := bootstrapper.SetupCerts(k.c, k8s, n) + return err } -// UpdateCluster updates the cluster +// UpdateCluster updates the cluster. func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error { images, err := images.Kubeadm(cfg.KubernetesConfig.ImageRepository, cfg.KubernetesConfig.KubernetesVersion) if err != nil { return errors.Wrap(err, "kubeadm images") } + r, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, + Runner: k.c, Socket: cfg.KubernetesConfig.CRISocket}) + if err != nil { + return errors.Wrap(err, "runtime") + } + + if err := r.Preload(cfg.KubernetesConfig); err != nil { + glog.Infof("prelaoding failed, will try to load cached images: %v", err) + } + if cfg.KubernetesConfig.ShouldLoadCachedImages { if err := machine.LoadImages(&cfg, k.c, images, constants.ImageCacheDir); err != nil { out.FailureT("Unable to load cached images: {{.error}}", out.V{"error": err}) } } - r, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, - Runner: k.c, Socket: cfg.KubernetesConfig.CRISocket}) - if err != nil { - return errors.Wrap(err, "runtime") + + for _, n := range cfg.Nodes { + err := k.UpdateNode(cfg, n, r) + if err != nil { + return errors.Wrap(err, "updating node") + } } - // TODO: multiple nodes - kubeadmCfg, err := bsutil.GenerateKubeadmYAML(cfg, r, cfg.Nodes[0]) + return nil +} + +// UpdateNode updates a node. +func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cruntime.Manager) error { + now := time.Now() + defer func() { + glog.Infof("reloadKubelet took %s", time.Since(now)) + }() + + kubeadmCfg, err := bsutil.GenerateKubeadmYAML(cfg, n, r) if err != nil { return errors.Wrap(err, "generating kubeadm cfg") } - // TODO: multiple nodes - kubeletCfg, err := bsutil.NewKubeletConfig(cfg, cfg.Nodes[0], r) + kubeletCfg, err := bsutil.NewKubeletConfig(cfg, n, r) if err != nil { return errors.Wrap(err, "generating kubelet config") } @@ -426,35 +685,40 @@ func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error { glog.Infof("kubelet %s config:\n%+v", kubeletCfg, cfg.KubernetesConfig) - // stop kubelet to avoid "Text File Busy" error - if err := stopKubelet(k.c); err != nil { - glog.Warningf("unable to stop kubelet: %s", err) - } + sm := sysinit.New(k.c) - if err := bsutil.TransferBinaries(cfg.KubernetesConfig, k.c); err != nil { + if err := bsutil.TransferBinaries(cfg.KubernetesConfig, k.c, sm); err != nil { return errors.Wrap(err, "downloading binaries") } - var cniFile []byte + files := []assets.CopyableFile{ + assets.NewMemoryAssetTarget(kubeadmCfg, bsutil.KubeadmYamlPath+".new", "0640"), + assets.NewMemoryAssetTarget(kubeletCfg, bsutil.KubeletSystemdConfFile+".new", "0644"), + assets.NewMemoryAssetTarget(kubeletService, bsutil.KubeletServiceFile+".new", "0644"), + } + // Copy the default CNI config (k8s.conf), so that kubelet can successfully + // start a Pod in the case a user hasn't manually installed any CNI plugin + // and minikube was started with "--extra-config=kubelet.network-plugin=cni". if cfg.KubernetesConfig.EnableDefaultCNI { - cniFile = []byte(defaultCNIConfig) + files = append(files, assets.NewMemoryAssetTarget([]byte(defaultCNIConfig), bsutil.DefaultCNIConfigPath, "0644")) } - files := bsutil.ConfigFileAssets(cfg.KubernetesConfig, kubeadmCfg, kubeletCfg, kubeletService, cniFile) + + // Installs compatibility shims for non-systemd environments + kubeletPath := path.Join(vmpath.GuestPersistentDir, "binaries", cfg.KubernetesConfig.KubernetesVersion, "kubectl") + shims, err := sm.GenerateInitShim("kubelet", kubeletPath, bsutil.KubeletSystemdConfFile) + if err != nil { + return errors.Wrap(err, "shim") + } + files = append(files, shims...) + if err := copyFiles(k.c, files); err != nil { - return err + return errors.Wrap(err, "copy") } - if err := startKubelet(k.c); err != nil { - return err + if err := startKubeletIfRequired(k.c, sm); err != nil { + return errors.Wrap(err, "reload") } - return nil -} -func stopKubelet(runner command.Runner) error { - stopCmd := exec.Command("/bin/bash", "-c", "pgrep kubelet && sudo systemctl stop kubelet") - if rr, err := runner.RunCmd(stopCmd); err != nil { - return errors.Wrapf(err, "command: %q output: %q", rr.Command(), rr.Output()) - } return nil } @@ -477,30 +741,55 @@ func copyFiles(runner command.Runner, files []assets.CopyableFile) error { return nil } -func startKubelet(runner command.Runner) error { - startCmd := exec.Command("/bin/bash", "-c", "sudo systemctl daemon-reload && sudo systemctl start kubelet") +func startKubeletIfRequired(runner command.Runner, sm sysinit.Manager) error { + now := time.Now() + defer func() { + glog.Infof("reloadKubelet took %s", time.Since(now)) + }() + + svc := bsutil.KubeletServiceFile + conf := bsutil.KubeletSystemdConfFile + + checkCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("pgrep kubelet && diff -u %s %s.new && diff -u %s %s.new", svc, svc, conf, conf)) + if _, err := runner.RunCmd(checkCmd); err == nil { + glog.Infof("kubelet is already running with the right configs") + return nil + } + + startCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo cp %s.new %s && sudo cp %s.new %s", svc, svc, conf, conf)) if _, err := runner.RunCmd(startCmd); err != nil { return errors.Wrap(err, "starting kubelet") } - return nil + + return sm.Start("kubelet") } // applyKicOverlay applies the CNI plugin needed to make kic work func (k *Bootstrapper) applyKicOverlay(cfg config.ClusterConfig) error { - // Allow no more than 5 seconds for apply kic overlay - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() + cmd := exec.CommandContext(ctx, "sudo", path.Join(vmpath.GuestPersistentDir, "binaries", cfg.KubernetesConfig.KubernetesVersion, "kubectl"), "create", fmt.Sprintf("--kubeconfig=%s", path.Join(vmpath.GuestPersistentDir, "kubeconfig")), "-f", "-") + b := bytes.Buffer{} if err := kicCNIConfig.Execute(&b, struct{ ImageName string }{ImageName: kic.OverlayImage}); err != nil { return err } + cmd.Stdin = bytes.NewReader(b.Bytes()) if rr, err := k.c.RunCmd(cmd); err != nil { return errors.Wrapf(err, "cmd: %s output: %s", rr.Command(), rr.Output()) } + + // Inform cri-o that the CNI has changed + if cfg.KubernetesConfig.ContainerRuntime == "crio" { + if err := sysinit.New(k.c).Restart("crio"); err != nil { + return errors.Wrap(err, "restart crio") + } + } + return nil } @@ -517,7 +806,7 @@ func (k *Bootstrapper) applyNodeLabels(cfg config.ClusterConfig) error { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() // example: - // sudo /var/lib/minikube/binaries/v1.17.3/kubectl label nodes minikube.k8s.io/version=v1.7.3 minikube.k8s.io/commit=aa91f39ffbcf27dcbb93c4ff3f457c54e585cf4a-dirty minikube.k8s.io/name=p1 minikube.k8s.io/updated_at=2020_02_20T12_05_35_0700 --all --overwrite --kubeconfig=/var/lib/minikube/kubeconfig + // sudo /var/lib/minikube/binaries//kubectl label nodes minikube.k8s.io/version= minikube.k8s.io/commit=aa91f39ffbcf27dcbb93c4ff3f457c54e585cf4a-dirty minikube.k8s.io/name=p1 minikube.k8s.io/updated_at=2020_02_20T12_05_35_0700 --all --overwrite --kubeconfig=/var/lib/minikube/kubeconfig cmd := exec.CommandContext(ctx, "sudo", path.Join(vmpath.GuestPersistentDir, "binaries", cfg.KubernetesConfig.KubernetesVersion, "kubectl"), "label", "nodes", verLbl, commitLbl, nameLbl, createdAtLbl, "--all", "--overwrite", diff --git a/pkg/minikube/browser/browser.go b/pkg/minikube/browser/browser.go new file mode 100644 index 0000000000..890e068844 --- /dev/null +++ b/pkg/minikube/browser/browser.go @@ -0,0 +1,37 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package browser + +import ( + "os/exec" + "runtime" + + "github.com/pkg/browser" + "k8s.io/minikube/pkg/minikube/out" +) + +// OpenURL opens a new browser window pointing to URL. +func OpenURL(url string) error { + if runtime.GOOS == "linux" { + _, err := exec.LookPath("xdg-open") + if err != nil { + out.T(out.URL, url) + return nil + } + } + return browser.OpenURL(url) +} diff --git a/pkg/minikube/cluster/pause.go b/pkg/minikube/cluster/pause.go index 2f98cf6de3..09b1a10c24 100644 --- a/pkg/minikube/cluster/pause.go +++ b/pkg/minikube/cluster/pause.go @@ -21,35 +21,33 @@ import ( "github.com/pkg/errors" "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/cruntime" - "k8s.io/minikube/pkg/minikube/kubelet" + "k8s.io/minikube/pkg/minikube/sysinit" ) -// DefaultNamespaces are namespaces used by minikube, including addons -var DefaultNamespaces = []string{ - "kube-system", - "kubernetes-dashboard", - "storage-gluster", - "istio-operator", -} - // Pause pauses a Kubernetes cluster func Pause(cr cruntime.Manager, r command.Runner, namespaces []string) ([]string, error) { ids := []string{} + // Disable the kubelet so it does not attempt to restart paused pods - if err := kubelet.Disable(r); err != nil { + sm := sysinit.New(r) + if err := sm.Disable("kubelet"); err != nil { return ids, errors.Wrap(err, "kubelet disable") } - if err := kubelet.Stop(r); err != nil { + + if err := sm.Stop("kubelet"); err != nil { return ids, errors.Wrap(err, "kubelet stop") } + ids, err := cr.ListContainers(cruntime.ListOptions{State: cruntime.Running, Namespaces: namespaces}) if err != nil { return ids, errors.Wrap(err, "list running") } + if len(ids) == 0 { glog.Warningf("no running containers to pause") return ids, nil } + return ids, cr.PauseContainers(ids) } @@ -67,11 +65,14 @@ func Unpause(cr cruntime.Manager, r command.Runner, namespaces []string) ([]stri return ids, errors.Wrap(err, "unpause") } - if err := kubelet.Enable(r); err != nil { + sm := sysinit.New(r) + if err := sm.Enable("kubelet"); err != nil { return ids, errors.Wrap(err, "kubelet enable") } - if err := kubelet.Start(r); err != nil { + + if err := sm.Start("kubelet"); err != nil { return ids, errors.Wrap(err, "kubelet start") } + return ids, nil } diff --git a/pkg/minikube/command/command_runner.go b/pkg/minikube/command/command_runner.go index dfd30c5233..06be22eb36 100644 --- a/pkg/minikube/command/command_runner.go +++ b/pkg/minikube/command/command_runner.go @@ -17,12 +17,17 @@ limitations under the License. package command import ( + "bufio" "bytes" "fmt" + "io" + "os" "os/exec" - "path" + "strconv" "strings" + "time" + "github.com/pkg/errors" "k8s.io/minikube/pkg/minikube/assets" ) @@ -55,10 +60,6 @@ type Runner interface { Remove(assets.CopyableFile) error } -func getDeleteFileCommand(f assets.CopyableFile) string { - return fmt.Sprintf("sudo rm %s", path.Join(f.GetTargetDir(), f.GetTargetName())) -} - // Command returns a human readable command string that does not induce eye fatigue func (rr RunResult) Command() string { var sb strings.Builder @@ -84,3 +85,101 @@ func (rr RunResult) Output() string { } return sb.String() } + +// teePrefix copies bytes from a reader to writer, logging each new line. +func teePrefix(prefix string, r io.Reader, w io.Writer, logger func(format string, args ...interface{})) error { + scanner := bufio.NewScanner(r) + scanner.Split(bufio.ScanBytes) + var line bytes.Buffer + + for scanner.Scan() { + b := scanner.Bytes() + if _, err := w.Write(b); err != nil { + return err + } + if bytes.IndexAny(b, "\r\n") == 0 { + if line.Len() > 0 { + logger("%s%s", prefix, line.String()) + line.Reset() + } + continue + } + line.Write(b) + } + // Catch trailing output in case stream does not end with a newline + if line.Len() > 0 { + logger("%s%s", prefix, line.String()) + } + return nil +} + +// fileExists checks that the same file exists on the other end +func fileExists(r Runner, f assets.CopyableFile, dst string) (bool, error) { + // It's too difficult to tell if the file exists with the exact contents + if f.GetSourcePath() == assets.MemorySource { + return false, nil + } + + // get file size and modtime of the source + srcSize := f.GetLength() + srcModTime, err := f.GetModTime() + if err != nil { + return false, err + } + if srcModTime.IsZero() { + return false, nil + } + + // get file size and modtime of the destination + rr, err := r.RunCmd(exec.Command("stat", "-c", "%s %y", dst)) + if err != nil { + if rr.ExitCode == 1 { + return false, nil + } + + // avoid the noise because ssh doesn't propagate the exit code + if strings.HasSuffix(err.Error(), "status 1") { + return false, nil + } + + return false, err + } + + stdout := strings.TrimSpace(rr.Stdout.String()) + outputs := strings.SplitN(stdout, " ", 2) + dstSize, err := strconv.Atoi(outputs[0]) + if err != nil { + return false, err + } + + dstModTime, err := time.Parse(layout, outputs[1]) + if err != nil { + return false, err + } + + if srcSize != dstSize { + return false, errors.New("source file and destination file are different sizes") + } + + return srcModTime.Equal(dstModTime), nil +} + +// writeFile is like ioutil.WriteFile, but does not require reading file into memory +func writeFile(dst string, f assets.CopyableFile, perms os.FileMode) error { + w, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE, perms) + if err != nil { + return errors.Wrap(err, "create") + } + defer w.Close() + + r := f.(io.Reader) + n, err := io.Copy(w, r) + if err != nil { + return errors.Wrap(err, "copy") + } + + if n != int64(f.GetLength()) { + return fmt.Errorf("%s: expected to write %d bytes, but wrote %d instead", dst, f.GetLength(), n) + } + return w.Close() +} diff --git a/pkg/minikube/command/exec_runner.go b/pkg/minikube/command/exec_runner.go index 5dc5b81a64..43b49c59f2 100644 --- a/pkg/minikube/command/exec_runner.go +++ b/pkg/minikube/command/exec_runner.go @@ -46,6 +46,8 @@ func NewExecRunner() Runner { // RunCmd implements the Command Runner interface to run a exec.Cmd object func (*execRunner) RunCmd(cmd *exec.Cmd) (*RunResult, error) { rr := &RunResult{Args: cmd.Args} + glog.Infof("Run: %v", rr.Command()) + var outb, errb io.Writer if cmd.Stdout == nil { var so bytes.Buffer @@ -84,35 +86,31 @@ func (*execRunner) RunCmd(cmd *exec.Cmd) (*RunResult, error) { // Copy copies a file and its permissions func (*execRunner) Copy(f assets.CopyableFile) error { - targetPath := path.Join(f.GetTargetDir(), f.GetTargetName()) - if _, err := os.Stat(targetPath); err == nil { - if err := os.Remove(targetPath); err != nil { - return errors.Wrapf(err, "error removing file %s", targetPath) + dst := path.Join(f.GetTargetDir(), f.GetTargetName()) + if _, err := os.Stat(dst); err == nil { + glog.Infof("found %s, removing ...", dst) + if err := os.Remove(dst); err != nil { + return errors.Wrapf(err, "error removing file %s", dst) } + } + src := f.GetSourcePath() + glog.Infof("cp: %s --> %s (%d bytes)", src, dst, f.GetLength()) + if f.GetLength() == 0 { + glog.Warningf("0 byte asset: %+v", f) } - target, err := os.Create(targetPath) - if err != nil { - return errors.Wrapf(err, "error creating file at %s", targetPath) - } + perms, err := strconv.ParseInt(f.GetPermissions(), 8, 0) if err != nil { return errors.Wrapf(err, "error converting permissions %s to integer", f.GetPermissions()) } - if err := os.Chmod(targetPath, os.FileMode(perms)); err != nil { - return errors.Wrapf(err, "error changing file permissions for %s", targetPath) - } - if _, err = io.Copy(target, f); err != nil { - return errors.Wrapf(err, `error copying file %s to target location: -do you have the correct permissions?`, - targetPath) - } - return target.Close() + return writeFile(dst, f, os.FileMode(perms)) } // Remove removes a file func (*execRunner) Remove(f assets.CopyableFile) error { - targetPath := filepath.Join(f.GetTargetDir(), f.GetTargetName()) - return os.Remove(targetPath) + dst := filepath.Join(f.GetTargetDir(), f.GetTargetName()) + glog.Infof("rm: %s", dst) + return os.Remove(dst) } diff --git a/pkg/minikube/command/fake_runner.go b/pkg/minikube/command/fake_runner.go index 82a6d833df..9da8377a26 100644 --- a/pkg/minikube/command/fake_runner.go +++ b/pkg/minikube/command/fake_runner.go @@ -97,13 +97,13 @@ func (f *FakeCommandRunner) Copy(file assets.CopyableFile) error { if err != nil { return errors.Wrapf(err, "error reading file: %+v", file) } - f.fileMap.Store(file.GetAssetName(), b.String()) + f.fileMap.Store(file.GetSourcePath(), b.String()) return nil } // Remove removes the filename, file contents key value pair from the stored map func (f *FakeCommandRunner) Remove(file assets.CopyableFile) error { - f.fileMap.Delete(file.GetAssetName()) + f.fileMap.Delete(file.GetSourcePath()) return nil } diff --git a/pkg/minikube/command/kic_runner.go b/pkg/minikube/command/kic_runner.go index 2d00a15251..018fe3bfae 100644 --- a/pkg/minikube/command/kic_runner.go +++ b/pkg/minikube/command/kic_runner.go @@ -88,6 +88,7 @@ func (k *kicRunner) RunCmd(cmd *exec.Cmd) (*RunResult, error) { oc.Env = cmd.Env rr := &RunResult{Args: cmd.Args} + glog.Infof("Run: %v", rr.Command()) var outb, errb io.Writer if oc.Stdout == nil { @@ -127,44 +128,73 @@ func (k *kicRunner) RunCmd(cmd *exec.Cmd) (*RunResult, error) { // Copy copies a file and its permissions func (k *kicRunner) Copy(f assets.CopyableFile) error { - src := f.GetAssetName() - if _, err := os.Stat(f.GetAssetName()); os.IsNotExist(err) { - fc := make([]byte, f.GetLength()) // Read asset file into a []byte - if _, err := f.Read(fc); err != nil { - return errors.Wrap(err, "can't copy non-existing file") - } // we have a MemoryAsset, will write to disk before copying + dst := path.Join(path.Join(f.GetTargetDir(), f.GetTargetName())) - tmpFile, err := ioutil.TempFile(os.TempDir(), "tmpf-memory-asset") + // For tiny files, it's cheaper to overwrite than check + if f.GetLength() > 4096 { + exists, err := fileExists(k, f, dst) if err != nil { - return errors.Wrap(err, "creating temporary file") + glog.Infof("existence error for %s: %v", dst, err) } - // clean up the temp file - defer os.Remove(tmpFile.Name()) - if _, err = tmpFile.Write(fc); err != nil { - return errors.Wrap(err, "write to temporary file") + if exists { + glog.Infof("copy: skipping %s (exists)", dst) + return nil } + } - // Close the file - if err := tmpFile.Close(); err != nil { - return errors.Wrap(err, "close temporary file") - } - src = tmpFile.Name() + src := f.GetSourcePath() + if f.GetLength() == 0 { + glog.Warningf("0 byte asset: %+v", f) } perms, err := strconv.ParseInt(f.GetPermissions(), 8, 0) if err != nil { - return errors.Wrapf(err, "converting permissions %s to integer", f.GetPermissions()) + return errors.Wrapf(err, "error converting permissions %s to integer", f.GetPermissions()) } - // Rely on cp -a to propagate permissions - if err := os.Chmod(src, os.FileMode(perms)); err != nil { - return errors.Wrapf(err, "chmod") + if src != assets.MemorySource { + // Take the fast path + fi, err := os.Stat(src) + if err == nil { + if fi.Mode() == os.FileMode(perms) { + glog.Infof("%s (direct): %s --> %s (%d bytes)", k.ociBin, src, dst, f.GetLength()) + return k.copy(src, dst) + } + + // If >1MB, avoid local copy + if fi.Size() > (1024 * 1024) { + glog.Infof("%s (chmod): %s --> %s (%d bytes)", k.ociBin, src, dst, f.GetLength()) + if err := k.copy(src, dst); err != nil { + return err + } + return k.chmod(dst, f.GetPermissions()) + } + } } - dest := fmt.Sprintf("%s:%s", k.nameOrID, path.Join(f.GetTargetDir(), f.GetTargetName())) + glog.Infof("%s (temp): %s --> %s (%d bytes)", k.ociBin, src, dst, f.GetLength()) + tf, err := ioutil.TempFile("", "tmpf-memory-asset") + if err != nil { + return errors.Wrap(err, "creating temporary file") + } + defer os.Remove(tf.Name()) + + if err := writeFile(tf.Name(), f, os.FileMode(perms)); err != nil { + return errors.Wrap(err, "write") + } + return k.copy(tf.Name(), dst) +} + +func (k *kicRunner) copy(src string, dst string) error { + fullDest := fmt.Sprintf("%s:%s", k.nameOrID, dst) if k.ociBin == oci.Podman { - return copyToPodman(src, dest) + return copyToPodman(src, fullDest) } - return copyToDocker(src, dest) + return copyToDocker(src, fullDest) +} + +func (k *kicRunner) chmod(dst string, perm string) error { + _, err := k.RunCmd(exec.Command("sudo", "chmod", perm, dst)) + return err } // Podman cp command doesn't match docker and doesn't have -a @@ -184,11 +214,11 @@ func copyToDocker(src string, dest string) error { // Remove removes a file func (k *kicRunner) Remove(f assets.CopyableFile) error { - fp := path.Join(f.GetTargetDir(), f.GetTargetName()) - if rr, err := k.RunCmd(exec.Command("sudo", "rm", fp)); err != nil { - return errors.Wrapf(err, "removing file %q output: %s", fp, rr.Output()) - } - return nil + dst := path.Join(f.GetTargetDir(), f.GetTargetName()) + glog.Infof("rm: %s", dst) + + _, err := k.RunCmd(exec.Command("sudo", "rm", dst)) + return err } // isTerminal returns true if the writer w is a terminal diff --git a/pkg/minikube/command/ssh_runner.go b/pkg/minikube/command/ssh_runner.go index 9d1f03a04d..ce3cc58522 100644 --- a/pkg/minikube/command/ssh_runner.go +++ b/pkg/minikube/command/ssh_runner.go @@ -17,14 +17,11 @@ limitations under the License. package command import ( - "bufio" "bytes" "fmt" "io" "os/exec" "path" - "strconv" - "strings" "sync" "time" @@ -55,13 +52,16 @@ func NewSSHRunner(c *ssh.Client) *SSHRunner { // Remove runs a command to delete a file on the remote. func (s *SSHRunner) Remove(f assets.CopyableFile) error { + dst := path.Join(f.GetTargetDir(), f.GetTargetName()) + glog.Infof("rm: %s", dst) + sess, err := s.c.NewSession() if err != nil { return errors.Wrap(err, "getting ssh session") } + defer sess.Close() - cmd := getDeleteFileCommand(f) - return sess.Run(cmd) + return sess.Run(fmt.Sprintf("sudo rm %s", dst)) } // teeSSH runs an SSH command, streaming stdout, stderr to logs @@ -150,14 +150,26 @@ func (s *SSHRunner) RunCmd(cmd *exec.Cmd) (*RunResult, error) { // Copy copies a file to the remote over SSH. func (s *SSHRunner) Copy(f assets.CopyableFile) error { dst := path.Join(path.Join(f.GetTargetDir(), f.GetTargetName())) - exists, err := s.sameFileExists(f, dst) - if err != nil { - glog.Infof("Checked if %s exists, but got error: %v", dst, err) + + // For small files, don't bother risking being wrong for no performance benefit + if f.GetLength() > 2048 { + exists, err := fileExists(s, f, dst) + if err != nil { + glog.Infof("existence check for %s: %v", dst, err) + } + + if exists { + glog.Infof("copy: skipping %s (exists)", dst) + return nil + } } - if exists { - glog.Infof("Skipping copying %s as it already exists", dst) - return nil + + src := f.GetSourcePath() + glog.Infof("scp %s --> %s (%d bytes)", src, dst, f.GetLength()) + if f.GetLength() == 0 { + glog.Warningf("0 byte asset: %+v", f) } + sess, err := s.c.NewSession() if err != nil { return errors.Wrap(err, "NewSession") @@ -171,14 +183,13 @@ func (s *SSHRunner) Copy(f assets.CopyableFile) error { // StdinPipe is closed. But let's use errgroup to make it explicit. var g errgroup.Group var copied int64 - glog.Infof("Transferring %d bytes to %s", f.GetLength(), dst) g.Go(func() error { defer w.Close() header := fmt.Sprintf("C%s %d %s\n", f.GetPermissions(), f.GetLength(), f.GetTargetName()) fmt.Fprint(w, header) if f.GetLength() == 0 { - glog.Warningf("%s is a 0 byte asset!", f.GetTargetName()) + glog.Warningf("asked to copy a 0 byte asset: %+v", f) fmt.Fprint(w, "\x00") return nil } @@ -190,7 +201,6 @@ func (s *SSHRunner) Copy(f assets.CopyableFile) error { if copied != int64(f.GetLength()) { return fmt.Errorf("%s: expected to copy %d bytes, but copied %d instead", f.GetTargetName(), f.GetLength(), copied) } - glog.Infof("%s: copied %d bytes", f.GetTargetName(), copied) fmt.Fprint(w, "\x00") return nil }) @@ -208,72 +218,3 @@ func (s *SSHRunner) Copy(f assets.CopyableFile) error { } return g.Wait() } - -func (s *SSHRunner) sameFileExists(f assets.CopyableFile, dst string) (bool, error) { - // get file size and modtime of the source - srcSize := f.GetLength() - srcModTime, err := f.GetModTime() - if err != nil { - return false, err - } - if srcModTime.IsZero() { - return false, nil - } - - // get file size and modtime of the destination - sess, err := s.c.NewSession() - if err != nil { - return false, err - } - - cmd := "stat -c \"%s %y\" " + dst - out, err := sess.CombinedOutput(cmd) - if err != nil { - return false, err - } - outputs := strings.SplitN(strings.Trim(string(out), "\n"), " ", 2) - - dstSize, err := strconv.Atoi(outputs[0]) - if err != nil { - return false, err - } - dstModTime, err := time.Parse(layout, outputs[1]) - if err != nil { - return false, err - } - glog.Infof("found %s: %d bytes, modified at %s", dst, dstSize, dstModTime) - - // compare sizes and modtimes - if srcSize != dstSize { - return false, errors.New("source file and destination file are different sizes") - } - - return srcModTime.Equal(dstModTime), nil -} - -// teePrefix copies bytes from a reader to writer, logging each new line. -func teePrefix(prefix string, r io.Reader, w io.Writer, logger func(format string, args ...interface{})) error { - scanner := bufio.NewScanner(r) - scanner.Split(bufio.ScanBytes) - var line bytes.Buffer - - for scanner.Scan() { - b := scanner.Bytes() - if _, err := w.Write(b); err != nil { - return err - } - if bytes.IndexAny(b, "\r\n") == 0 { - if line.Len() > 0 { - logger("%s%s", prefix, line.String()) - line.Reset() - } - continue - } - line.Write(b) - } - // Catch trailing output in case stream does not end with a newline - if line.Len() > 0 { - logger("%s%s", prefix, line.String()) - } - return nil -} diff --git a/pkg/minikube/config/config.go b/pkg/minikube/config/config.go index 4a52268ce5..4db4af8dc5 100644 --- a/pkg/minikube/config/config.go +++ b/pkg/minikube/config/config.go @@ -52,6 +52,14 @@ const ( var ( // ErrKeyNotFound is the error returned when a key doesn't exist in the config file ErrKeyNotFound = errors.New("specified key could not be found in config") + // DockerEnv contains the environment variables + DockerEnv []string + // DockerOpt contains the option parameters + DockerOpt []string + // ExtraOptions contains extra options (if any) + ExtraOptions ExtraOptionSlice + // AddonList contains the list of addons + AddonList []string ) // ErrNotExist is the error returned when a config does not exist @@ -185,7 +193,6 @@ func (c *simpleConfigLoader) LoadConfigFromFile(profileName string, miniHome ... } func (c *simpleConfigLoader) WriteConfigToFile(profileName string, cc *ClusterConfig, miniHome ...string) error { - // Move to profile package path := profileFilePath(profileName, miniHome...) contents, err := json.MarshalIndent(cc, "", " ") if err != nil { diff --git a/pkg/minikube/config/profile.go b/pkg/minikube/config/profile.go index c146a8ac42..bfb6298c71 100644 --- a/pkg/minikube/config/profile.go +++ b/pkg/minikube/config/profile.go @@ -111,6 +111,23 @@ func CreateEmptyProfile(name string, miniHome ...string) error { return SaveProfile(name, cfg, miniHome...) } +// SaveNode saves a node to a cluster +func SaveNode(cfg *ClusterConfig, node *Node) error { + update := false + for i, n := range cfg.Nodes { + if n.Name == node.Name { + cfg.Nodes[i] = *node + update = true + break + } + } + + if !update { + cfg.Nodes = append(cfg.Nodes, *node) + } + return SaveProfile(viper.GetString(ProfileName), cfg) +} + // SaveProfile creates an profile out of the cfg and stores in $MINIKUBE_HOME/profiles//config.json func SaveProfile(name string, cfg *ClusterConfig, miniHome ...string) error { data, err := json.MarshalIndent(cfg, "", " ") diff --git a/pkg/minikube/config/types.go b/pkg/minikube/config/types.go index 97c2a13039..95991d5f33 100644 --- a/pkg/minikube/config/types.go +++ b/pkg/minikube/config/types.go @@ -65,6 +65,7 @@ type ClusterConfig struct { KubernetesConfig KubernetesConfig Nodes []Node Addons map[string]bool + VerifyComponents map[string]bool // map of components to verify and wait for after start. } // KubernetesConfig contains the parameters used to configure the VM Kubernetes. diff --git a/pkg/minikube/constants/constants.go b/pkg/minikube/constants/constants.go index 5caeeb7505..7917b0e7aa 100644 --- a/pkg/minikube/constants/constants.go +++ b/pkg/minikube/constants/constants.go @@ -17,6 +17,7 @@ limitations under the License. package constants import ( + "errors" "path/filepath" "k8s.io/client-go/tools/clientcmd" @@ -26,9 +27,9 @@ import ( const ( // DefaultKubernetesVersion is the default kubernetes version - DefaultKubernetesVersion = "v1.17.3" + DefaultKubernetesVersion = "v1.18.0" // NewestKubernetesVersion is the newest Kubernetes version to test against - NewestKubernetesVersion = "v1.18.0-beta.2" + NewestKubernetesVersion = "v1.18.0" // OldestKubernetesVersion is the oldest Kubernetes version to test against OldestKubernetesVersion = "v1.11.10" // DefaultClusterName is the default nane for the k8s cluster @@ -92,4 +93,15 @@ var ( KubernetesReleaseBinaries = []string{"kubelet", "kubeadm", "kubectl"} // ImageCacheDir is the path to the image cache directory ImageCacheDir = localpath.MakeMiniPath("cache", "images") + + // DefaultNamespaces are kubernetes namespaces used by minikube, including addons + DefaultNamespaces = []string{ + "kube-system", + "kubernetes-dashboard", + "storage-gluster", + "istio-operator", + } + + // ErrMachineMissing is returned when virtual machine does not exist due to user interrupt cancel(i.e. Ctrl + C) + ErrMachineMissing = errors.New("machine does not exist") ) diff --git a/pkg/minikube/constants/constants_freebsd.go b/pkg/minikube/constants/constants_freebsd.go new file mode 100644 index 0000000000..77a47a765f --- /dev/null +++ b/pkg/minikube/constants/constants_freebsd.go @@ -0,0 +1,26 @@ +// +build linux, !gendocs + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package constants + +import ( + "k8s.io/client-go/util/homedir" +) + +// DefaultMountDir is the default mount dir +var DefaultMountDir = homedir.HomeDir() diff --git a/pkg/minikube/cruntime/containerd.go b/pkg/minikube/cruntime/containerd.go index d817dd7de0..8eadb57392 100644 --- a/pkg/minikube/cruntime/containerd.go +++ b/pkg/minikube/cruntime/containerd.go @@ -30,7 +30,9 @@ import ( "github.com/pkg/errors" "k8s.io/minikube/pkg/minikube/bootstrapper/images" "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/download" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/sysinit" ) const ( @@ -115,6 +117,7 @@ type Containerd struct { Runner CommandRunner ImageRepository string KubernetesVersion semver.Version + Init sysinit.Manager } // Name is a human readable name for containerd @@ -157,9 +160,7 @@ func (r *Containerd) DefaultCNI() bool { // Active returns if containerd is active on the host func (r *Containerd) Active() bool { - c := exec.Command("sudo", "systemctl", "is-active", "--quiet", "service", "containerd") - _, err := r.Runner.RunCmd(c) - return err == nil + return r.Init.Active("containerd") } // Available returns an error if it is not possible to use this runtime on a host @@ -207,21 +208,14 @@ func (r *Containerd) Enable(disOthers bool) error { if err := enableIPForwarding(r.Runner); err != nil { return err } + // Otherwise, containerd will fail API requests with 'Unimplemented' - c := exec.Command("sudo", "systemctl", "restart", "containerd") - if _, err := r.Runner.RunCmd(c); err != nil { - return errors.Wrap(err, "restart containerd") - } - return nil + return r.Init.Restart("containerd") } // Disable idempotently disables containerd on a host func (r *Containerd) Disable() error { - c := exec.Command("sudo", "systemctl", "stop", "containerd") - if _, err := r.Runner.RunCmd(c); err != nil { - return errors.Wrapf(err, "stop containerd") - } - return nil + return r.Init.ForceStop("containerd") } // ImageExists checks if an image exists, expected input format @@ -313,5 +307,8 @@ func (r *Containerd) SystemLogCmd(len int) string { // Preload preloads the container runtime with k8s images func (r *Containerd) Preload(cfg config.KubernetesConfig) error { + if !download.PreloadExists(cfg.KubernetesVersion, cfg.ContainerRuntime) { + return nil + } return fmt.Errorf("not yet implemented for %s", r.Name()) } diff --git a/pkg/minikube/cruntime/cri.go b/pkg/minikube/cruntime/cri.go index 2cf50c78e2..6f05551235 100644 --- a/pkg/minikube/cruntime/cri.go +++ b/pkg/minikube/cruntime/cri.go @@ -195,7 +195,7 @@ func stopCRIContainers(cr CommandRunner, ids []string) error { glog.Infof("Stopping containers: %s", ids) crictl := getCrictlPath(cr) - args := append([]string{crictl, "rm"}, ids...) + args := append([]string{crictl, "stop"}, ids...) c := exec.Command("sudo", args...) if _, err := cr.RunCmd(c); err != nil { return errors.Wrap(err, "crictl") diff --git a/pkg/minikube/cruntime/crio.go b/pkg/minikube/cruntime/crio.go index 804e4989ba..250d765df4 100644 --- a/pkg/minikube/cruntime/crio.go +++ b/pkg/minikube/cruntime/crio.go @@ -26,7 +26,9 @@ import ( "github.com/pkg/errors" "k8s.io/minikube/pkg/minikube/bootstrapper/images" "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/download" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/sysinit" ) const ( @@ -40,6 +42,7 @@ type CRIO struct { Runner CommandRunner ImageRepository string KubernetesVersion semver.Version + Init sysinit.Manager } // generateCRIOConfig sets up /etc/crio/crio.conf @@ -103,9 +106,7 @@ func (r *CRIO) Available() error { // Active returns if CRIO is active on the host func (r *CRIO) Active() bool { - c := exec.Command("sudo", "systemctl", "is-active", "--quiet", "service", "crio") - _, err := r.Runner.RunCmd(c) - return err == nil + return r.Init.Active("crio") } // Enable idempotently enables CRIO on a host @@ -124,19 +125,12 @@ func (r *CRIO) Enable(disOthers bool) error { if err := enableIPForwarding(r.Runner); err != nil { return err } - - if _, err := r.Runner.RunCmd(exec.Command("sudo", "systemctl", "restart", "crio")); err != nil { - return errors.Wrapf(err, "enable crio.") - } - return nil + return r.Init.Start("crio") } // Disable idempotently disables CRIO on a host func (r *CRIO) Disable() error { - if _, err := r.Runner.RunCmd(exec.Command("sudo", "systemctl", "stop", "crio")); err != nil { - return errors.Wrapf(err, "disable crio.") - } - return nil + return r.Init.ForceStop("crio") } // ImageExists checks if an image exists @@ -230,5 +224,8 @@ func (r *CRIO) SystemLogCmd(len int) string { // Preload preloads the container runtime with k8s images func (r *CRIO) Preload(cfg config.KubernetesConfig) error { + if !download.PreloadExists(cfg.KubernetesVersion, cfg.ContainerRuntime) { + return nil + } return fmt.Errorf("not yet implemented for %s", r.Name()) } diff --git a/pkg/minikube/cruntime/cruntime.go b/pkg/minikube/cruntime/cruntime.go index d7153d0830..bbb410be13 100644 --- a/pkg/minikube/cruntime/cruntime.go +++ b/pkg/minikube/cruntime/cruntime.go @@ -28,6 +28,7 @@ import ( "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/sysinit" ) // ContainerState is the run state of a container @@ -131,13 +132,27 @@ type ListOptions struct { // New returns an appropriately configured runtime func New(c Config) (Manager, error) { + sm := sysinit.New(c.Runner) + switch c.Type { case "", "docker": - return &Docker{Socket: c.Socket, Runner: c.Runner}, nil + return &Docker{Socket: c.Socket, Runner: c.Runner, Init: sm}, nil case "crio", "cri-o": - return &CRIO{Socket: c.Socket, Runner: c.Runner, ImageRepository: c.ImageRepository, KubernetesVersion: c.KubernetesVersion}, nil + return &CRIO{ + Socket: c.Socket, + Runner: c.Runner, + ImageRepository: c.ImageRepository, + KubernetesVersion: c.KubernetesVersion, + Init: sm, + }, nil case "containerd": - return &Containerd{Socket: c.Socket, Runner: c.Runner, ImageRepository: c.ImageRepository, KubernetesVersion: c.KubernetesVersion}, nil + return &Containerd{ + Socket: c.Socket, + Runner: c.Runner, + ImageRepository: c.ImageRepository, + KubernetesVersion: c.KubernetesVersion, + Init: sm, + }, nil default: return nil, fmt.Errorf("unknown runtime type: %q", c.Type) } @@ -151,6 +166,7 @@ func ContainerStatusCommand() string { // disableOthers disables all other runtimes except for me. func disableOthers(me Manager, cr CommandRunner) error { + // valid values returned by manager.Name() runtimes := []string{"containerd", "crio", "docker"} for _, name := range runtimes { @@ -163,13 +179,22 @@ func disableOthers(me Manager, cr CommandRunner) error { if r.Name() == me.Name() { continue } + + // Don't disable containerd if we are bound to it + if me.Name() == "Docker" && r.Name() == "containerd" && dockerBoundToContainerd(cr) { + glog.Infof("skipping containerd shutdown because we are bound to it") + continue + } + // runtime is already disabled, nothing to do. if !r.Active() { continue } + if err = r.Disable(); err != nil { glog.Warningf("disable failed: %v", err) } + // Validate that the runtime really is offline - and that Active & Disable are properly written. if r.Active() { return fmt.Errorf("%s is still active", r.Name()) diff --git a/pkg/minikube/cruntime/cruntime_test.go b/pkg/minikube/cruntime/cruntime_test.go index af70e01c80..aef420cf84 100644 --- a/pkg/minikube/cruntime/cruntime_test.go +++ b/pkg/minikube/cruntime/cruntime_test.go @@ -23,6 +23,7 @@ import ( "strings" "testing" + "github.com/golang/glog" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/pkg/errors" @@ -406,8 +407,27 @@ func (f *FakeRunner) crictl(args []string, _ bool) (string, error) { // systemctl is a fake implementation of systemctl func (f *FakeRunner) systemctl(args []string, root bool) (string, error) { // nolint result 0 (string) is always "" + glog.Infof("fake systemctl: %v", args) action := args[0] - svcs := args[1:] + + if action == "--version" { + return "systemd 123 (321.2-1)", nil + } + + if action == "daemon-reload" { + return "ok", nil + } + + var svcs []string + if len(args) > 0 { + svcs = args[1:] + } + + // force + if svcs[0] == "-f" { + svcs = svcs[1:] + } + out := "" for i, arg := range args { @@ -492,7 +512,6 @@ func TestVersion(t *testing.T) { // defaultServices reflects the default boot state for the minikube VM var defaultServices = map[string]serviceState{ "docker": SvcRunning, - "docker.socket": SvcRunning, "crio": SvcExited, "crio-shutdown": SvcExited, "containerd": SvcExited, @@ -503,9 +522,9 @@ func TestDisable(t *testing.T) { runtime string want []string }{ - {"docker", []string{"sudo", "systemctl", "stop", "docker", "docker.socket"}}, - {"crio", []string{"sudo", "systemctl", "stop", "crio"}}, - {"containerd", []string{"sudo", "systemctl", "stop", "containerd"}}, + {"docker", []string{"sudo", "systemctl", "stop", "-f", "docker"}}, + {"crio", []string{"sudo", "systemctl", "stop", "-f", "crio"}}, + {"containerd", []string{"sudo", "systemctl", "stop", "-f", "containerd"}}, } for _, tc := range tests { t.Run(tc.runtime, func(t *testing.T) { @@ -535,23 +554,20 @@ func TestEnable(t *testing.T) { }{ {"docker", map[string]serviceState{ "docker": SvcRunning, - "docker.socket": SvcRunning, "containerd": SvcExited, "crio": SvcExited, "crio-shutdown": SvcExited, }}, {"containerd", map[string]serviceState{ "docker": SvcExited, - "docker.socket": SvcExited, "containerd": SvcRestarted, "crio": SvcExited, "crio-shutdown": SvcExited, }}, {"crio", map[string]serviceState{ "docker": SvcExited, - "docker.socket": SvcExited, "containerd": SvcExited, - "crio": SvcRestarted, + "crio": SvcRunning, "crio-shutdown": SvcExited, }}, } diff --git a/pkg/minikube/cruntime/docker.go b/pkg/minikube/cruntime/docker.go index 8b203badec..95f8554152 100644 --- a/pkg/minikube/cruntime/docker.go +++ b/pkg/minikube/cruntime/docker.go @@ -32,6 +32,7 @@ import ( "k8s.io/minikube/pkg/minikube/docker" "k8s.io/minikube/pkg/minikube/download" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/sysinit" ) // KubernetesContainerPrefix is the prefix of each kubernetes container @@ -56,6 +57,7 @@ func (e *ErrISOFeature) Error() string { type Docker struct { Socket string Runner CommandRunner + Init sysinit.Manager } // Name is a human readable name for Docker @@ -97,9 +99,7 @@ func (r *Docker) Available() error { // Active returns if docker is active on the host func (r *Docker) Active() bool { - c := exec.Command("sudo", "systemctl", "is-active", "--quiet", "service", "docker") - _, err := r.Runner.RunCmd(c) - return err == nil + return r.Init.Active("docker") } // Enable idempotently enables Docker on a host @@ -109,29 +109,18 @@ func (r *Docker) Enable(disOthers bool) error { glog.Warningf("disableOthers: %v", err) } } - c := exec.Command("sudo", "systemctl", "start", "docker") - if _, err := r.Runner.RunCmd(c); err != nil { - return errors.Wrap(err, "enable docker.") - } - return nil + + return r.Init.Start("docker") } // Restart restarts Docker on a host func (r *Docker) Restart() error { - c := exec.Command("sudo", "systemctl", "restart", "docker") - if _, err := r.Runner.RunCmd(c); err != nil { - return errors.Wrap(err, "restarting docker.") - } - return nil + return r.Init.Restart("docker") } // Disable idempotently disables Docker on a host func (r *Docker) Disable() error { - c := exec.Command("sudo", "systemctl", "stop", "docker", "docker.socket") - if _, err := r.Runner.RunCmd(c); err != nil { - return errors.Wrap(err, "disable docker") - } - return nil + return r.Init.ForceStop("docker") } // ImageExists checks if an image exists @@ -290,7 +279,11 @@ func (r *Docker) SystemLogCmd(len int) string { // 2. Extract the preloaded tarball to the correct directory // 3. Remove the tarball within the VM func (r *Docker) Preload(cfg config.KubernetesConfig) error { + if !download.PreloadExists(cfg.KubernetesVersion, cfg.ContainerRuntime) { + return nil + } k8sVersion := cfg.KubernetesVersion + cRuntime := cfg.ContainerRuntime // If images already exist, return images, err := images.Kubeadm(cfg.ImageRepository, k8sVersion) @@ -307,7 +300,7 @@ func (r *Docker) Preload(cfg config.KubernetesConfig) error { glog.Infof("error saving reference store: %v", err) } - tarballPath := download.TarballPath(k8sVersion) + tarballPath := download.TarballPath(k8sVersion, cRuntime) targetDir := "/" targetName := "preloaded.tar.lz4" dest := path.Join(targetDir, targetName) @@ -371,3 +364,18 @@ func DockerImagesPreloaded(runner command.Runner, images []string) bool { } return true } + +func dockerBoundToContainerd(runner command.Runner) bool { + // NOTE: assumes systemd + rr, err := runner.RunCmd(exec.Command("sudo", "systemctl", "cat", "docker.service")) + if err != nil { + glog.Warningf("unable to check if docker is bound to containerd") + return false + } + + if strings.Contains(rr.Stdout.String(), "\nBindsTo=containerd") { + return true + } + + return false +} diff --git a/pkg/minikube/download/preload.go b/pkg/minikube/download/preload.go index 62893edf1b..59a0c4b7e0 100644 --- a/pkg/minikube/download/preload.go +++ b/pkg/minikube/download/preload.go @@ -23,7 +23,8 @@ import ( "io/ioutil" "net/http" "os" - "path" + "path/filepath" + "runtime" "cloud.google.com/go/storage" "google.golang.org/api/option" @@ -31,25 +32,28 @@ import ( "github.com/golang/glog" "github.com/hashicorp/go-getter" "github.com/pkg/errors" + "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/out" ) const ( // PreloadVersion is the current version of the preloaded tarball - PreloadVersion = "v1" + // + // NOTE: You may need to bump this version up when upgrading auxiliary docker images + PreloadVersion = "v2" // PreloadBucket is the name of the GCS bucket where preloaded volume tarballs exist PreloadBucket = "minikube-preloaded-volume-tarballs" ) -// returns name of the tarball -func tarballName(k8sVersion string) string { - return fmt.Sprintf("preloaded-images-k8s-%s-%s-docker-overlay2.tar.lz4", PreloadVersion, k8sVersion) +// TarballName returns name of the tarball +func TarballName(k8sVersion, containerRuntime string) string { + return fmt.Sprintf("preloaded-images-k8s-%s-%s-%s-overlay2-%s.tar.lz4", PreloadVersion, k8sVersion, containerRuntime, runtime.GOARCH) } // returns the name of the checksum file -func checksumName(k8sVersion string) string { - return fmt.Sprintf("%s.checksum", tarballName(k8sVersion)) +func checksumName(k8sVersion, containerRuntime string) string { + return fmt.Sprintf("%s.checksum", TarballName(k8sVersion, containerRuntime)) } // returns target dir for all cached items related to preloading @@ -57,35 +61,44 @@ func targetDir() string { return localpath.MakeMiniPath("cache", "preloaded-tarball") } -// PreloadChecksumPath returns path to checksum file -func PreloadChecksumPath(k8sVersion string) string { - return path.Join(targetDir(), checksumName(k8sVersion)) +// PreloadChecksumPath returns the local path to the cached checksum file +func PreloadChecksumPath(k8sVersion, containerRuntime string) string { + return filepath.Join(targetDir(), checksumName(k8sVersion, containerRuntime)) } -// TarballPath returns the path to the preloaded tarball -func TarballPath(k8sVersion string) string { - return path.Join(targetDir(), tarballName(k8sVersion)) +// TarballPath returns the local path to the cached preload tarball +func TarballPath(k8sVersion, containerRuntime string) string { + return filepath.Join(targetDir(), TarballName(k8sVersion, containerRuntime)) } // remoteTarballURL returns the URL for the remote tarball in GCS -func remoteTarballURL(k8sVersion string) string { - return fmt.Sprintf("https://storage.googleapis.com/%s/%s", PreloadBucket, tarballName(k8sVersion)) +func remoteTarballURL(k8sVersion, containerRuntime string) string { + return fmt.Sprintf("https://storage.googleapis.com/%s/%s", PreloadBucket, TarballName(k8sVersion, containerRuntime)) } // PreloadExists returns true if there is a preloaded tarball that can be used func PreloadExists(k8sVersion, containerRuntime string) bool { + glog.Infof("Checking if preload exists for k8s version %s and runtime %s", k8sVersion, containerRuntime) + if !viper.GetBool("preload") { + return false + } + + // See https://github.com/kubernetes/minikube/issues/6933 + // and https://github.com/kubernetes/minikube/issues/6934 + // to track status of adding containerd & crio if containerRuntime != "docker" { + glog.Info("Container runtime isn't docker, skipping preload") return false } // Omit remote check if tarball exists locally - targetPath := TarballPath(k8sVersion) + targetPath := TarballPath(k8sVersion, containerRuntime) if _, err := os.Stat(targetPath); err == nil { glog.Infof("Found local preload: %s", targetPath) return true } - url := remoteTarballURL(k8sVersion) + url := remoteTarballURL(k8sVersion, containerRuntime) resp, err := http.Head(url) if err != nil { glog.Warningf("%s fetch error: %v", url, err) @@ -104,10 +117,7 @@ func PreloadExists(k8sVersion, containerRuntime string) bool { // Preload caches the preloaded images tarball on the host machine func Preload(k8sVersion, containerRuntime string) error { - if containerRuntime != "docker" { - return nil - } - targetPath := TarballPath(k8sVersion) + targetPath := TarballPath(k8sVersion, containerRuntime) if _, err := os.Stat(targetPath); err == nil { glog.Infof("Found %s in cache, skipping download", targetPath) @@ -120,8 +130,8 @@ func Preload(k8sVersion, containerRuntime string) error { return nil } - out.T(out.FileDownload, "Downloading preloaded images tarball for k8s {{.version}} ...", out.V{"version": k8sVersion}) - url := remoteTarballURL(k8sVersion) + out.T(out.FileDownload, "Downloading Kubernetes {{.version}} preload ...", out.V{"version": k8sVersion}) + url := remoteTarballURL(k8sVersion, containerRuntime) tmpDst := targetPath + ".download" client := &getter.Client{ @@ -136,34 +146,34 @@ func Preload(k8sVersion, containerRuntime string) error { return errors.Wrapf(err, "download failed: %s", url) } - if err := saveChecksumFile(k8sVersion); err != nil { + if err := saveChecksumFile(k8sVersion, containerRuntime); err != nil { return errors.Wrap(err, "saving checksum file") } - if err := verifyChecksum(k8sVersion, tmpDst); err != nil { + if err := verifyChecksum(k8sVersion, containerRuntime, tmpDst); err != nil { return errors.Wrap(err, "verify") } return os.Rename(tmpDst, targetPath) } -func saveChecksumFile(k8sVersion string) error { - glog.Infof("saving checksum for %s ...", tarballName(k8sVersion)) +func saveChecksumFile(k8sVersion, containerRuntime string) error { + glog.Infof("saving checksum for %s ...", TarballName(k8sVersion, containerRuntime)) ctx := context.Background() client, err := storage.NewClient(ctx, option.WithoutAuthentication()) if err != nil { return errors.Wrap(err, "getting storage client") } - attrs, err := client.Bucket(PreloadBucket).Object(tarballName(k8sVersion)).Attrs(ctx) + attrs, err := client.Bucket(PreloadBucket).Object(TarballName(k8sVersion, containerRuntime)).Attrs(ctx) if err != nil { return errors.Wrap(err, "getting storage object") } checksum := attrs.MD5 - return ioutil.WriteFile(PreloadChecksumPath(k8sVersion), checksum, 0644) + return ioutil.WriteFile(PreloadChecksumPath(k8sVersion, containerRuntime), checksum, 0644) } // verifyChecksum returns true if the checksum of the local binary matches // the checksum of the remote binary -func verifyChecksum(k8sVersion string, path string) error { +func verifyChecksum(k8sVersion, containerRuntime, path string) error { glog.Infof("verifying checksumm of %s ...", path) // get md5 checksum of tarball path contents, err := ioutil.ReadFile(path) @@ -172,7 +182,7 @@ func verifyChecksum(k8sVersion string, path string) error { } checksum := md5.Sum(contents) - remoteChecksum, err := ioutil.ReadFile(PreloadChecksumPath(k8sVersion)) + remoteChecksum, err := ioutil.ReadFile(PreloadChecksumPath(k8sVersion, containerRuntime)) if err != nil { return errors.Wrap(err, "reading checksum file") } diff --git a/pkg/minikube/driver/driver.go b/pkg/minikube/driver/driver.go index 2dce6350cd..77b44e15ef 100644 --- a/pkg/minikube/driver/driver.go +++ b/pkg/minikube/driver/driver.go @@ -19,6 +19,7 @@ package driver import ( "fmt" "os" + "runtime" "sort" "strings" @@ -112,7 +113,7 @@ func IsMock(name string) bool { // IsVM checks if the driver is a VM func IsVM(name string) bool { - if IsKIC(name) || IsMock(name) || BareMetal(name) { + if IsKIC(name) || BareMetal(name) { return false } return true @@ -128,6 +129,12 @@ func NeedsRoot(name string) bool { return name == None || name == Podman } +// NeedsPortForward returns true if driver is unable provide direct IP connectivity +func NeedsPortForward(name string) bool { + // Docker for Desktop + return IsKIC(name) && (runtime.GOOS == "darwin" || runtime.GOOS == "windows") +} + // HasResourceLimits returns true if driver can set resource limits such as memory size or CPU count. func HasResourceLimits(name string) bool { return !(name == None || name == Podman) @@ -164,8 +171,8 @@ func FlagDefaults(name string) FlagHints { } // Choices returns a list of drivers which are possible on this system -func Choices() []registry.DriverState { - options := registry.Available() +func Choices(vm bool) []registry.DriverState { + options := registry.Available(vm) // Descending priority for predictability and appearance sort.Slice(options, func(i, j int) bool { @@ -174,8 +181,8 @@ func Choices() []registry.DriverState { return options } -// Suggest returns a suggested driver from a set of options -func Suggest(options []registry.DriverState) (registry.DriverState, []registry.DriverState) { +// Suggest returns a suggested driver, alternate drivers, and rejected drivers +func Suggest(options []registry.DriverState) (registry.DriverState, []registry.DriverState, []registry.DriverState) { pick := registry.DriverState{} for _, ds := range options { if !ds.State.Installed { @@ -198,17 +205,29 @@ func Suggest(options []registry.DriverState) (registry.DriverState, []registry.D } alternates := []registry.DriverState{} + rejects := []registry.DriverState{} for _, ds := range options { if ds != pick { - if !ds.State.Healthy || !ds.State.Installed { + if !ds.State.Installed { + ds.Rejection = fmt.Sprintf("Not installed: %v", ds.State.Error) + rejects = append(rejects, ds) continue } + + if !ds.State.Healthy { + ds.Rejection = fmt.Sprintf("Not healthy: %v", ds.State.Error) + rejects = append(rejects, ds) + continue + } + + ds.Rejection = fmt.Sprintf("%s is preferred", pick.Name) alternates = append(alternates, ds) } } glog.Infof("Picked: %+v", pick) glog.Infof("Alternatives: %+v", alternates) - return pick, alternates + glog.Infof("Rejects: %+v", rejects) + return pick, alternates, rejects } // Status returns the status of a driver diff --git a/pkg/minikube/config/node.go b/pkg/minikube/driver/driver_freebsd.go similarity index 58% rename from pkg/minikube/config/node.go rename to pkg/minikube/driver/driver_freebsd.go index 1c6f050159..21b8761649 100644 --- a/pkg/minikube/config/node.go +++ b/pkg/minikube/driver/driver_freebsd.go @@ -14,23 +14,19 @@ See the License for the specific language governing permissions and limitations under the License. */ -package config +package driver -// AddNode adds a new node config to an existing cluster. -func AddNode(cc *ClusterConfig, name string, controlPlane bool, k8sVersion string, profileName string) error { - node := Node{ - Name: name, - Worker: true, - } +import "os/exec" - if controlPlane { - node.ControlPlane = true - } - - if k8sVersion != "" { - node.KubernetesVersion = k8sVersion - } - - cc.Nodes = append(cc.Nodes, node) - return SaveProfile(profileName, cc) +// supportedDrivers is a list of supported drivers on Darwin. +var supportedDrivers = []string{ + VirtualBox, +} + +func VBoxManagePath() string { + cmd := "VBoxManage" + if path, err := exec.LookPath(cmd); err == nil { + return path + } + return cmd } diff --git a/pkg/minikube/driver/driver_test.go b/pkg/minikube/driver/driver_test.go index 8f9be829ad..b6afd6a62c 100644 --- a/pkg/minikube/driver/driver_test.go +++ b/pkg/minikube/driver/driver_test.go @@ -112,6 +112,7 @@ func TestSuggest(t *testing.T) { choices []string pick string alts []string + rejects []string }{ { def: registry.DriverDef{ @@ -122,6 +123,7 @@ func TestSuggest(t *testing.T) { choices: []string{"unhealthy"}, pick: "", alts: []string{}, + rejects: []string{"unhealthy"}, }, { def: registry.DriverDef{ @@ -132,6 +134,7 @@ func TestSuggest(t *testing.T) { choices: []string{"discouraged", "unhealthy"}, pick: "", alts: []string{"discouraged"}, + rejects: []string{"unhealthy"}, }, { def: registry.DriverDef{ @@ -142,6 +145,7 @@ func TestSuggest(t *testing.T) { choices: []string{"default", "discouraged", "unhealthy"}, pick: "default", alts: []string{"discouraged"}, + rejects: []string{"unhealthy"}, }, { def: registry.DriverDef{ @@ -152,6 +156,7 @@ func TestSuggest(t *testing.T) { choices: []string{"preferred", "default", "discouraged", "unhealthy"}, pick: "preferred", alts: []string{"default", "discouraged"}, + rejects: []string{"unhealthy"}, }, } for _, tc := range tests { @@ -162,7 +167,7 @@ func TestSuggest(t *testing.T) { } } - got := Choices() + got := Choices(false) gotNames := []string{} for _, c := range got { gotNames = append(gotNames, c.Name) @@ -172,7 +177,7 @@ func TestSuggest(t *testing.T) { t.Errorf("choices mismatch (-want +got):\n%s", diff) } - pick, alts := Suggest(got) + pick, alts, rejects := Suggest(got) if pick.Name != tc.pick { t.Errorf("pick = %q, expected %q", pick.Name, tc.pick) } @@ -184,6 +189,15 @@ func TestSuggest(t *testing.T) { if diff := cmp.Diff(gotAlts, tc.alts); diff != "" { t.Errorf("alts mismatch (-want +got):\n%s", diff) } + + gotRejects := []string{} + for _, r := range rejects { + gotRejects = append(gotRejects, r.Name) + } + if diff := cmp.Diff(gotRejects, tc.rejects); diff != "" { + t.Errorf("rejects mismatch (-want +got):\n%s", diff) + } + }) } } diff --git a/pkg/minikube/driver/endpoint.go b/pkg/minikube/driver/endpoint.go new file mode 100644 index 0000000000..dc9507a4d9 --- /dev/null +++ b/pkg/minikube/driver/endpoint.go @@ -0,0 +1,47 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver + +import ( + "net" + + "k8s.io/minikube/pkg/drivers/kic/oci" + "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/constants" +) + +// ControlPaneEndpoint returns the location where callers can reach this cluster +func ControlPaneEndpoint(cc *config.ClusterConfig, cp *config.Node, driverName string) (string, net.IP, int, error) { + if NeedsPortForward(driverName) { + port, err := oci.ForwardedPort(cc.Driver, cc.Name, cp.Port) + hostname := oci.DefaultBindIPV4 + ip := net.ParseIP(hostname) + + // https://github.com/kubernetes/minikube/issues/3878 + if cc.KubernetesConfig.APIServerName != constants.APIServerName { + hostname = cc.KubernetesConfig.APIServerName + } + return hostname, ip, port, err + } + + // https://github.com/kubernetes/minikube/issues/3878 + hostname := cp.IP + if cc.KubernetesConfig.APIServerName != constants.APIServerName { + hostname = cc.KubernetesConfig.APIServerName + } + return hostname, net.ParseIP(cp.IP), cp.Port, nil +} diff --git a/pkg/minikube/exit/exit.go b/pkg/minikube/exit/exit.go index 4ed989d2a6..4f9f734b2c 100644 --- a/pkg/minikube/exit/exit.go +++ b/pkg/minikube/exit/exit.go @@ -18,14 +18,13 @@ limitations under the License. package exit import ( - "fmt" "os" "runtime" + "runtime/debug" "github.com/golang/glog" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/problem" - "k8s.io/minikube/pkg/minikube/translate" ) // Exit codes based on sysexits(3) @@ -40,9 +39,6 @@ const ( IO = 74 // IO represents an I/O error Config = 78 // Config represents an unconfigured or misconfigured state Permissions = 77 // Permissions represents a permissions error - - // MaxLogEntries controls the number of log entries to show for each source - MaxLogEntries = 3 ) // UsageT outputs a templated usage error and exits with error code 64 @@ -59,18 +55,19 @@ func WithCodeT(code int, format string, a ...out.V) { // WithError outputs an error and exits. func WithError(msg string, err error) { + glog.Infof("WithError(%s)=%v called from:\n%s", msg, err, debug.Stack()) p := problem.FromError(err, runtime.GOOS) if p != nil { - WithProblem(msg, p) + WithProblem(msg, err, p) } - displayError(msg, err) + out.DisplayError(msg, err) os.Exit(Software) } // WithProblem outputs info related to a known problem and exits. -func WithProblem(msg string, p *problem.Problem) { +func WithProblem(msg string, err error, p *problem.Problem) { out.ErrT(out.Empty, "") - out.FatalT(msg) + out.FailureT("[{{.id}}] {{.msg}} {{.error}}", out.V{"msg": msg, "id": p.ID, "error": p.Err}) p.Display() if p.ShowIssueLink { out.ErrT(out.Empty, "") @@ -79,29 +76,3 @@ func WithProblem(msg string, p *problem.Problem) { } os.Exit(Config) } - -// WithLogEntries outputs an error along with any important log entries, and exits. -func WithLogEntries(msg string, err error, entries map[string][]string) { - displayError(msg, err) - - for name, lines := range entries { - out.T(out.FailureType, "Problems detected in {{.entry}}:", out.V{"entry": name}) - if len(lines) > MaxLogEntries { - lines = lines[:MaxLogEntries] - } - for _, l := range lines { - out.T(out.LogEntry, l) - } - } - os.Exit(Software) -} - -func displayError(msg string, err error) { - // use Warning because Error will display a duplicate message to stderr - glog.Warningf(fmt.Sprintf("%s: %v", msg, err)) - out.ErrT(out.Empty, "") - out.FatalT("{{.msg}}: {{.err}}", out.V{"msg": translate.T(msg), "err": err}) - out.ErrT(out.Empty, "") - out.ErrT(out.Sad, "minikube is exiting due to an error. If the above message is not useful, open an issue:") - out.ErrT(out.URL, "https://github.com/kubernetes/minikube/issues/new/choose") -} diff --git a/pkg/minikube/extract/extract.go b/pkg/minikube/extract/extract.go index 8453d53ea5..584acd827e 100644 --- a/pkg/minikube/extract/extract.go +++ b/pkg/minikube/extract/extract.go @@ -29,6 +29,10 @@ import ( "strconv" "strings" + // initflag must be imported before any other minikube pkg. + // Fix for https://github.com/kubernetes/minikube/issues/4866 + _ "k8s.io/minikube/pkg/initflag" + "github.com/golang-collections/collections/stack" "github.com/pkg/errors" "k8s.io/minikube/pkg/util/lock" @@ -45,6 +49,7 @@ var blacklist = []string{ "env {{.docker_env}}", "\\n", "==\u003e {{.name}} \u003c==", + "- {{.profile}}", } // ErrMapFile is a constant to refer to the err_map file, which contains the Advice strings. @@ -450,14 +455,17 @@ func writeStringsToFiles(e *state, output string) error { return nil } fmt.Printf("Writing to %s\n", filepath.Base(path)) - var currentTranslations map[string]interface{} + currentTranslations := make(map[string]interface{}) f, err := ioutil.ReadFile(path) if err != nil { return errors.Wrap(err, "reading translation file") } - err = json.Unmarshal(f, ¤tTranslations) - if err != nil { - return errors.Wrap(err, "unmarshalling current translations") + // Unmarhsal nonempty files + if len(f) > 0 { + err = json.Unmarshal(f, ¤tTranslations) + if err != nil { + return errors.Wrap(err, "unmarshalling current translations") + } } // Make sure to not overwrite already translated strings diff --git a/pkg/minikube/kubeconfig/context_test.go b/pkg/minikube/kubeconfig/context_test.go index 29a3b604ca..7725294441 100644 --- a/pkg/minikube/kubeconfig/context_test.go +++ b/pkg/minikube/kubeconfig/context_test.go @@ -24,7 +24,8 @@ import ( ) func TestDeleteContext(t *testing.T) { - fn := tempFile(t, fakeKubeCfg) + // See kubeconfig_test + fn := tempFile(t, kubeConfigWithoutHTTPS) if err := DeleteContext("la-croix", fn); err != nil { t.Fatal(err) } diff --git a/pkg/minikube/kubeconfig/kubeconfig.go b/pkg/minikube/kubeconfig/kubeconfig.go index 6742ac4979..b6ecf1d4a0 100644 --- a/pkg/minikube/kubeconfig/kubeconfig.go +++ b/pkg/minikube/kubeconfig/kubeconfig.go @@ -19,7 +19,6 @@ package kubeconfig import ( "fmt" "io/ioutil" - "net" "net/url" "os" "path/filepath" @@ -35,51 +34,27 @@ import ( "k8s.io/minikube/pkg/util/lock" ) -// IsClusterInConfig verifies the ip stored in kubeconfig. -func IsClusterInConfig(ip net.IP, clusterName string, configPath ...string) (bool, error) { +// VerifyEndpoint verifies the IP:port stored in kubeconfig. +func VerifyEndpoint(contextName string, hostname string, port int, configPath ...string) error { path := PathFromEnv() if configPath != nil { path = configPath[0] } - if ip == nil { - return false, fmt.Errorf("error, empty ip passed") - } - kip, err := extractIP(clusterName, path) - if err != nil { - return false, err - } - if kip.Equal(ip) { - return true, nil - } - // Kubeconfig IP misconfigured - return false, nil -} + if hostname == "" { + return fmt.Errorf("empty IP") + } -// Port returns the Port number stored for minikube in the kubeconfig specified -func Port(clusterName string, configPath ...string) (int, error) { - path := PathFromEnv() - if configPath != nil { - path = configPath[0] - } - cfg, err := readOrNew(path) + gotHostname, gotPort, err := Endpoint(contextName, path) if err != nil { - return 0, errors.Wrap(err, "Error getting kubeconfig status") + return errors.Wrap(err, "extract IP") } - cluster, ok := cfg.Clusters[clusterName] - if !ok { - return 0, errors.Errorf("Kubeconfig does not have a record of the machine cluster") + + if hostname != gotHostname || port != gotPort { + return fmt.Errorf("got: %s:%d, want: %s:%d", gotHostname, gotPort, hostname, port) } - kurl, err := url.Parse(cluster.Server) - if err != nil { - return constants.APIServerPort, nil - } - _, kport, err := net.SplitHostPort(kurl.Host) - if err != nil { - return constants.APIServerPort, nil - } - port, err := strconv.Atoi(kport) - return port, err + + return nil } // PathFromEnv gets the path to the first kubeconfig @@ -98,65 +73,58 @@ func PathFromEnv() string { return constants.KubeconfigPath } -// extractIP returns the IP address stored for minikube in the kubeconfig specified -func extractIP(machineName string, configPath ...string) (net.IP, error) { +// Endpoint returns the IP:port address stored for minikube in the kubeconfig specified +func Endpoint(contextName string, configPath ...string) (string, int, error) { path := PathFromEnv() if configPath != nil { path = configPath[0] } apiCfg, err := readOrNew(path) if err != nil { - return nil, errors.Wrap(err, "Error getting kubeconfig status") + return "", 0, errors.Wrap(err, "read") } - cluster, ok := apiCfg.Clusters[machineName] + cluster, ok := apiCfg.Clusters[contextName] if !ok { - return nil, errors.Errorf("Kubeconfig does not have a record of the machine cluster") + return "", 0, errors.Errorf("%q does not appear in %s", contextName, path) } - kurl, err := url.Parse(cluster.Server) + + glog.Infof("found %q server: %q", contextName, cluster.Server) + u, err := url.Parse(cluster.Server) if err != nil { - return net.ParseIP(cluster.Server), nil + return "", 0, errors.Wrap(err, "url parse") } - kip, _, err := net.SplitHostPort(kurl.Host) + + port, err := strconv.Atoi(u.Port()) if err != nil { - return net.ParseIP(kurl.Host), nil + return "", 0, errors.Wrap(err, "atoi") } - ip := net.ParseIP(kip) - return ip, nil + + return u.Hostname(), port, nil } -// UpdateIP overwrites the IP stored in kubeconfig with the provided IP. -func UpdateIP(ip net.IP, machineName string, configPath ...string) (bool, error) { - path := PathFromEnv() - if configPath != nil { - path = configPath[0] +// UpdateEndpoint overwrites the IP stored in kubeconfig with the provided IP. +func UpdateEndpoint(contextName string, hostname string, port int, path string) (bool, error) { + if hostname == "" { + return false, fmt.Errorf("empty ip") } - if ip == nil { - return false, fmt.Errorf("error, empty ip passed") - } - - kip, err := extractIP(machineName, path) - if err != nil { - return false, err - } - if kip.Equal(ip) { + err := VerifyEndpoint(contextName, hostname, port, path) + if err == nil { return false, nil } - kport, err := Port(machineName, path) - if err != nil { - return false, err - } + glog.Infof("verify returned: %v", err) + cfg, err := readOrNew(path) if err != nil { - return false, errors.Wrap(err, "Error getting kubeconfig status") + return false, errors.Wrap(err, "read") } - // Safe to lookup server because if field non-existent getIPFromKubeconfig would have given an error - cfg.Clusters[machineName].Server = "https://" + ip.String() + ":" + strconv.Itoa(kport) + + cfg.Clusters[contextName].Server = "https://" + hostname + ":" + strconv.Itoa(port) err = writeToFile(cfg, path) if err != nil { - return false, err + return false, errors.Wrap(err, "write") } - // Kubeconfig IP reconfigured + return true, nil } diff --git a/pkg/minikube/kubeconfig/kubeconfig_test.go b/pkg/minikube/kubeconfig/kubeconfig_test.go index 7c58b574d2..c41f167f03 100644 --- a/pkg/minikube/kubeconfig/kubeconfig_test.go +++ b/pkg/minikube/kubeconfig/kubeconfig_test.go @@ -18,7 +18,6 @@ package kubeconfig import ( "io/ioutil" - "net" "os" "path/filepath" "strconv" @@ -30,7 +29,7 @@ import ( "k8s.io/client-go/tools/clientcmd" ) -var fakeKubeCfg = []byte(` +var kubeConfigWithoutHTTPS = []byte(` apiVersion: v1 clusters: - cluster: @@ -52,7 +51,7 @@ users: client-key: /home/la-croix/apiserver.key `) -var fakeKubeCfg2 = []byte(` +var kubeConfig192 = []byte(` apiVersion: v1 clusters: - cluster: @@ -74,12 +73,34 @@ users: client-key: /home/la-croix/apiserver.key `) -var fakeKubeCfg3 = []byte(` +var kubeConfigLocalhost = []byte(` apiVersion: v1 clusters: - cluster: certificate-authority: /home/la-croix/apiserver.crt - server: https://192.168.1.1:8443 + server: https://127.0.0.1:8443 + name: minikube +contexts: +- context: + cluster: la-croix + user: la-croix + name: la-croix +current-context: la-croix +kind: Config +preferences: {} +users: +- name: la-croix + user: + client-certificate: /home/la-croix/apiserver.crt + client-key: /home/la-croix/apiserver.key +`) + +var kubeConfigLocalhost12345 = []byte(` +apiVersion: v1 +clusters: +- cluster: + certificate-authority: /home/la-croix/apiserver.crt + server: https://127.0.0.1:12345 name: minikube contexts: - context: @@ -120,7 +141,7 @@ func TestUpdate(t *testing.T) { { description: "add to kube config", cfg: setupCfg, - existingCfg: fakeKubeCfg, + existingCfg: kubeConfigWithoutHTTPS, }, { description: "use config env var", @@ -136,7 +157,7 @@ func TestUpdate(t *testing.T) { CertificateAuthority: "/home/apiserver.crt", KeepContext: true, }, - existingCfg: fakeKubeCfg, + existingCfg: kubeConfigWithoutHTTPS, }, } @@ -176,54 +197,72 @@ func TestUpdate(t *testing.T) { } } -func TestIsClusterInConfig(t *testing.T) { +func TestVerifyEndpoint(t *testing.T) { var tests = []struct { description string - ip net.IP + hostname string + port int existing []byte err bool status bool }{ { - description: "empty ip", - ip: nil, - existing: fakeKubeCfg, + description: "empty hostname", + hostname: "", + port: 8443, + existing: kubeConfigWithoutHTTPS, err: true, }, { description: "no minikube cluster", - ip: net.ParseIP("192.168.10.100"), - existing: fakeKubeCfg, + hostname: "192.168.10.100", + port: 8443, + existing: kubeConfigWithoutHTTPS, err: true, }, { - description: "exactly matching ip", - ip: net.ParseIP("192.168.10.100"), - existing: fakeKubeCfg2, + description: "exactly matching hostname/port", + hostname: "192.168.10.100", + port: 8443, + existing: kubeConfig192, status: true, }, { - description: "different ips", - ip: net.ParseIP("192.168.10.100"), - existing: fakeKubeCfg3, + description: "different hostnames", + hostname: "192.168.10.100", + port: 8443, + existing: kubeConfigLocalhost, + err: true, + }, + { + description: "different hostname", + hostname: "", + port: 8443, + existing: kubeConfigLocalhost, + err: true, + }, + { + description: "different ports", + hostname: "127.0.0.1", + port: 84430, + existing: kubeConfigLocalhost, + err: true, }, } for _, test := range tests { + test := test t.Run(test.description, func(t *testing.T) { t.Parallel() configFilename := tempFile(t, test.existing) - statusActual, err := IsClusterInConfig(test.ip, "minikube", configFilename) + err := VerifyEndpoint("minikube", test.hostname, test.port, configFilename) if err != nil && !test.err { t.Errorf("Got unexpected error: %v", err) } if err == nil && test.err { t.Errorf("Expected error but got none: %v", err) } - if test.status != statusActual { - t.Errorf("Expected status %t, but got %t", test.status, statusActual) - } }) } @@ -233,45 +272,58 @@ func TestUpdateIP(t *testing.T) { var tests = []struct { description string - ip net.IP + hostname string + port int existing []byte err bool status bool expCfg []byte }{ { - description: "empty ip", - ip: nil, - existing: fakeKubeCfg2, + description: "empty hostname", + hostname: "", + port: 8443, + existing: kubeConfig192, err: true, - expCfg: fakeKubeCfg2, + expCfg: kubeConfig192, }, { description: "no minikube cluster", - ip: net.ParseIP("192.168.10.100"), - existing: fakeKubeCfg, + hostname: "192.168.10.100", + port: 8080, + existing: kubeConfigWithoutHTTPS, err: true, - expCfg: fakeKubeCfg, + expCfg: kubeConfigWithoutHTTPS, }, { description: "same IP", - ip: net.ParseIP("192.168.10.100"), - existing: fakeKubeCfg2, - expCfg: fakeKubeCfg2, + hostname: "192.168.10.100", + port: 8443, + existing: kubeConfig192, + expCfg: kubeConfig192, }, { description: "different IP", - ip: net.ParseIP("192.168.10.100"), - existing: fakeKubeCfg3, + hostname: "127.0.0.1", + port: 8443, + existing: kubeConfig192, status: true, - expCfg: fakeKubeCfg2, + expCfg: kubeConfigLocalhost, + }, + { + description: "different port", + hostname: "127.0.0.1", + port: 12345, + existing: kubeConfigLocalhost, + status: true, + expCfg: kubeConfigLocalhost12345, }, } for _, test := range tests { t.Run(test.description, func(t *testing.T) { t.Parallel() configFilename := tempFile(t, test.existing) - statusActual, err := UpdateIP(test.ip, "minikube", configFilename) + statusActual, err := UpdateEndpoint("minikube", test.hostname, test.port, configFilename) if err != nil && !test.err { t.Errorf("Got unexpected error: %v", err) } @@ -336,37 +388,42 @@ func TestNewConfig(t *testing.T) { } } -func Test_extractIP(t *testing.T) { +func Test_Endpoint(t *testing.T) { var tests = []struct { description string cfg []byte - ip net.IP + hostname string + port int err bool }{ { description: "normal IP", - cfg: fakeKubeCfg2, - ip: net.ParseIP("192.168.10.100"), + cfg: kubeConfig192, + hostname: "192.168.10.100", + port: 8443, }, { description: "no minikube cluster", - cfg: fakeKubeCfg, + cfg: kubeConfigWithoutHTTPS, err: true, }, } for _, test := range tests { t.Run(test.description, func(t *testing.T) { configFilename := tempFile(t, test.cfg) - ip, err := extractIP("minikube", configFilename) + hostname, port, err := Endpoint("minikube", configFilename) if err != nil && !test.err { t.Errorf("Got unexpected error: %v", err) } if err == nil && test.err { t.Errorf("Expected error but got none: %v", err) } - if !ip.Equal(test.ip) { - t.Errorf("IP returned: %s does not match ip given: %s", ip, test.ip) + if hostname != test.hostname { + t.Errorf("got hostname = %q, want hostname = %q", hostname, test.hostname) + } + if port != test.port { + t.Errorf("got port = %q, want port = %q", port, test.port) } }) } diff --git a/pkg/minikube/kubelet/kubelet.go b/pkg/minikube/kubelet/kubelet.go deleted file mode 100644 index 2adc132681..0000000000 --- a/pkg/minikube/kubelet/kubelet.go +++ /dev/null @@ -1,105 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubelet - -import ( - "fmt" - "os/exec" - "strings" - "time" - - "github.com/golang/glog" - "github.com/pkg/errors" - "k8s.io/minikube/pkg/minikube/command" - "k8s.io/minikube/pkg/util/retry" -) - -// Stop idempotently stops the kubelet -func Stop(cr command.Runner) error { - glog.Infof("stopping kubelet ...") - stop := func() error { - cmd := exec.Command("sudo", "systemctl", "stop", "kubelet.service") - if rr, err := cr.RunCmd(cmd); err != nil { - glog.Errorf("temporary error for %q : %v", rr.Command(), err) - } - cmd = exec.Command("sudo", "systemctl", "show", "-p", "SubState", "kubelet") - rr, err := cr.RunCmd(cmd) - if err != nil { - glog.Errorf("temporary error: for %q : %v", rr.Command(), err) - } - if !strings.Contains(rr.Stdout.String(), "dead") && !strings.Contains(rr.Stdout.String(), "failed") { - return fmt.Errorf("unexpected kubelet state: %q", rr.Stdout.String()) - } - return nil - } - - if err := retry.Expo(stop, 2*time.Second, time.Minute*3, 5); err != nil { - return errors.Wrapf(err, "error stopping kubelet") - } - - return nil -} - -// Start starts the kubelet -func Start(cr command.Runner) error { - glog.Infof("restarting kubelet.service ...") - c := exec.Command("sudo", "systemctl", "start", "kubelet") - if _, err := cr.RunCmd(c); err != nil { - return err - } - return nil -} - -// Restart restarts the kubelet -func Restart(cr command.Runner) error { - glog.Infof("restarting kubelet.service ...") - c := exec.Command("sudo", "systemctl", "restart", "kubelet.service") - if _, err := cr.RunCmd(c); err != nil { - return err - } - return nil -} - -// Check checks on the status of the kubelet -func Check(cr command.Runner) error { - glog.Infof("checking for running kubelet ...") - c := exec.Command("sudo", "systemctl", "is-active", "--quiet", "service", "kubelet") - if _, err := cr.RunCmd(c); err != nil { - return errors.Wrap(err, "check kubelet") - } - return nil -} - -// Disable disables the Kubelet -func Disable(cr command.Runner) error { - glog.Infof("disabling kubelet ...") - c := exec.Command("sudo", "systemctl", "disable", "kubelet") - if _, err := cr.RunCmd(c); err != nil { - return errors.Wrap(err, "disable") - } - return nil -} - -// Enable enables the Kubelet -func Enable(cr command.Runner) error { - glog.Infof("enabling kubelet ...") - c := exec.Command("sudo", "systemctl", "enable", "kubelet") - if _, err := cr.RunCmd(c); err != nil { - return errors.Wrap(err, "enable") - } - return nil -} diff --git a/pkg/minikube/localpath/localpath.go b/pkg/minikube/localpath/localpath.go index d9faef5d9a..1ac1172b6a 100644 --- a/pkg/minikube/localpath/localpath.go +++ b/pkg/minikube/localpath/localpath.go @@ -38,13 +38,14 @@ func ConfigFile() string { // MiniPath returns the path to the user's minikube dir func MiniPath() string { - if os.Getenv(MinikubeHome) == "" { + minikubeHomeEnv := os.Getenv(MinikubeHome) + if minikubeHomeEnv == "" { return filepath.Join(homedir.HomeDir(), ".minikube") } - if filepath.Base(os.Getenv(MinikubeHome)) == ".minikube" { - return os.Getenv(MinikubeHome) + if filepath.Base(minikubeHomeEnv) == ".minikube" { + return minikubeHomeEnv } - return filepath.Join(os.Getenv(MinikubeHome), ".minikube") + return filepath.Join(minikubeHomeEnv, ".minikube") } // MakeMiniPath is a utility to calculate a relative path to our directory. @@ -54,6 +55,26 @@ func MakeMiniPath(fileName ...string) string { return filepath.Join(args...) } +// Profile returns the path to a profile +func Profile(name string) string { + return filepath.Join(MiniPath(), "profiles", name) +} + +// ClientCert returns client certificate path, used by kubeconfig +func ClientCert(name string) string { + return filepath.Join(Profile(name), "client.crt") +} + +// ClientKey returns client certificate path, used by kubeconfig +func ClientKey(name string) string { + return filepath.Join(Profile(name), "client.key") +} + +// CACert returns the minikube CA certificate shared between profiles +func CACert() string { + return filepath.Join(MiniPath(), "ca.crt") +} + // MachinePath returns the Minikube machine path of a machine func MachinePath(machine string, miniHome ...string) string { miniPath := MiniPath() diff --git a/pkg/minikube/localpath/localpath_test.go b/pkg/minikube/localpath/localpath_test.go index 173ca5df88..d8e6915a43 100644 --- a/pkg/minikube/localpath/localpath_test.go +++ b/pkg/minikube/localpath/localpath_test.go @@ -17,10 +17,15 @@ limitations under the License. package localpath import ( + "fmt" "io/ioutil" "os" + "path/filepath" "runtime" + "strings" "testing" + + "k8s.io/client-go/util/homedir" ) func TestReplaceWinDriveLetterToVolumeName(t *testing.T) { @@ -61,3 +66,95 @@ func TestHasWindowsDriveLetter(t *testing.T) { } } } + +func TestMiniPath(t *testing.T) { + var testCases = []struct { + env, basePath string + }{ + {"/tmp/.minikube", "/tmp/"}, + {"/tmp/", "/tmp"}, + {"", homedir.HomeDir()}, + } + originalEnv := os.Getenv(MinikubeHome) + defer func() { // revert to pre-test env var + err := os.Setenv(MinikubeHome, originalEnv) + if err != nil { + t.Fatalf("Error reverting env %s to its original value (%s) var after test ", MinikubeHome, originalEnv) + } + }() + for _, tc := range testCases { + t.Run(tc.env, func(t *testing.T) { + expectedPath := filepath.Join(tc.basePath, ".minikube") + os.Setenv(MinikubeHome, tc.env) + path := MiniPath() + if path != expectedPath { + t.Errorf("MiniPath expected to return '%s', but got '%s'", expectedPath, path) + } + }) + } +} + +func TestMachinePath(t *testing.T) { + var testCases = []struct { + miniHome []string + contains string + }{ + {[]string{"tmp", "foo", "bar", "baz"}, "tmp"}, + {[]string{"tmp"}, "tmp"}, + {[]string{}, MiniPath()}, + } + for _, tc := range testCases { + t.Run(fmt.Sprintf("%s", tc.miniHome), func(t *testing.T) { + machinePath := MachinePath("foo", tc.miniHome...) + if !strings.Contains(machinePath, tc.contains) { + t.Errorf("Function MachinePath returned (%v) which doesn't contain expected (%v)", machinePath, tc.contains) + } + }) + } +} + +type propertyFnWithArg func(string) string + +func TestPropertyWithNameArg(t *testing.T) { + var testCases = []struct { + propertyFunc propertyFnWithArg + name string + }{ + {Profile, "Profile"}, + {ClientCert, "ClientCert"}, + {ClientKey, "ClientKey"}, + } + miniPath := MiniPath() + mockedName := "foo" + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + if !strings.Contains(tc.propertyFunc(mockedName), MiniPath()) { + t.Errorf("Property %s(%v) doesn't contain miniPath %v", tc.name, tc.propertyFunc, miniPath) + } + if !strings.Contains(tc.propertyFunc(mockedName), mockedName) { + t.Errorf("Property %s(%v) doesn't contain passed name %v", tc.name, tc.propertyFunc, mockedName) + } + }) + + } +} + +type propertyFnWithoutArg func() string + +func TestPropertyWithoutNameArg(t *testing.T) { + var testCases = []struct { + propertyFunc propertyFnWithoutArg + name string + }{ + {ConfigFile, "ConfigFile"}, + {CACert, "CACert"}, + } + miniPath := MiniPath() + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + if !strings.Contains(tc.propertyFunc(), MiniPath()) { + t.Errorf("Property %s(%v) doesn't contain expected miniPath %v", tc.name, tc.propertyFunc, miniPath) + } + }) + } +} diff --git a/pkg/minikube/logs/logs.go b/pkg/minikube/logs/logs.go index ef26e9d7d7..aedf55457d 100644 --- a/pkg/minikube/logs/logs.go +++ b/pkg/minikube/logs/logs.go @@ -31,12 +31,37 @@ import ( "github.com/pkg/errors" "k8s.io/minikube/pkg/minikube/bootstrapper" "k8s.io/minikube/pkg/minikube/command" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/minikube/out" ) -// rootCauseRe is a regular expression that matches known failure root causes -var rootCauseRe = regexp.MustCompile(`^error: |eviction manager: pods.* evicted|unknown flag: --|forbidden.*no providers available|eviction manager:.*evicted|tls: bad certificate|kubelet.*no API client|kubelet.*No api server|STDIN.*127.0.0.1:8080|failed to create listener|address already in use|unable to evict any pods|eviction manager: unexpected error`) +// rootCauses are regular expressions that match known failures +var rootCauses = []string{ + `^error: `, + `eviction manager: pods.* evicted`, + `unknown flag: --`, + `forbidden.*no providers available`, + `eviction manager:.*evicted`, + `tls: bad certificate`, + `kubelet.*no API client`, + `kubelet.*No api server`, + `STDIN.*127.0.0.1:8080`, + `failed to create listener`, + `address already in use`, + `unable to evict any pods`, + `eviction manager: unexpected error`, + `Resetting AnonymousAuth to false`, + `Unable to register node.*forbidden`, + `Failed to initialize CSINodeInfo.*forbidden`, + `Failed to admit pod`, + `failed to "StartContainer"`, + `kubelet.*forbidden.*cannot \w+ resource`, + `leases.*forbidden.*cannot \w+ resource`, +} + +// rootCauseRe combines rootCauses into a single regex +var rootCauseRe = regexp.MustCompile(strings.Join(rootCauses, "|")) // ignoreCauseRe is a regular expression that matches spurious errors to not surface var ignoreCauseRe = regexp.MustCompile("error: no objects passed to apply") @@ -44,6 +69,7 @@ var ignoreCauseRe = regexp.MustCompile("error: no objects passed to apply") // importantPods are a list of pods to retrieve logs for, in addition to the bootstrapper logs. var importantPods = []string{ "kube-apiserver", + "etcd", "coredns", "kube-scheduler", "kube-proxy", @@ -62,9 +88,9 @@ type logRunner interface { const lookBackwardsCount = 400 // Follow follows logs from multiple files in tail(1) format -func Follow(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr logRunner) error { +func Follow(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr logRunner) error { cs := []string{} - for _, v := range logCommands(r, bs, 0, true) { + for _, v := range logCommands(r, bs, cfg, 0, true) { cs = append(cs, v+" &") } cs = append(cs, "wait") @@ -84,9 +110,9 @@ func IsProblem(line string) bool { } // FindProblems finds possible root causes among the logs -func FindProblems(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr logRunner) map[string][]string { +func FindProblems(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr logRunner) map[string][]string { pMap := map[string][]string{} - cmds := logCommands(r, bs, lookBackwardsCount, false) + cmds := logCommands(r, bs, cfg, lookBackwardsCount, false) for name := range cmds { glog.Infof("Gathering logs for %s ...", name) var b bytes.Buffer @@ -117,7 +143,7 @@ func FindProblems(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr logRunner // OutputProblems outputs discovered problems. func OutputProblems(problems map[string][]string, maxLines int) { for name, lines := range problems { - out.T(out.FailureType, "Problems detected in {{.name}}:", out.V{"name": name}) + out.FailureT("Problems detected in {{.name}}:", out.V{"name": name}) if len(lines) > maxLines { lines = lines[len(lines)-maxLines:] } @@ -128,8 +154,8 @@ func OutputProblems(problems map[string][]string, maxLines int) { } // Output displays logs from multiple sources in tail(1) format -func Output(r cruntime.Manager, bs bootstrapper.Bootstrapper, runner command.Runner, lines int) error { - cmds := logCommands(r, bs, lines, false) +func Output(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, runner command.Runner, lines int) error { + cmds := logCommands(r, bs, cfg, lines, false) cmds["kernel"] = "uptime && uname -a && grep PRETTY /etc/os-release" names := []string{} @@ -166,8 +192,8 @@ func Output(r cruntime.Manager, bs bootstrapper.Bootstrapper, runner command.Run } // logCommands returns a list of commands that would be run to receive the anticipated logs -func logCommands(r cruntime.Manager, bs bootstrapper.Bootstrapper, length int, follow bool) map[string]string { - cmds := bs.LogCommands(bootstrapper.LogOptions{Lines: length, Follow: follow}) +func logCommands(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, length int, follow bool) map[string]string { + cmds := bs.LogCommands(cfg, bootstrapper.LogOptions{Lines: length, Follow: follow}) for _, pod := range importantPods { ids, err := r.ListContainers(cruntime.ListOptions{Name: pod}) if err != nil { @@ -186,5 +212,6 @@ func logCommands(r cruntime.Manager, bs bootstrapper.Bootstrapper, length int, f } cmds[r.Name()] = r.SystemLogCmd(length) cmds["container status"] = cruntime.ContainerStatusCommand() + return cmds } diff --git a/pkg/minikube/logs/logs_test.go b/pkg/minikube/logs/logs_test.go index bfb8b14b55..918ba60cfb 100644 --- a/pkg/minikube/logs/logs_test.go +++ b/pkg/minikube/logs/logs_test.go @@ -36,6 +36,19 @@ func TestIsProblem(t *testing.T) { {"no-objects-passed-to-apply #4010", false, "error: no objects passed to apply"}, {"bad-certificate #4251", true, "log.go:172] http: TLS handshake error from 127.0.0.1:49200: remote error: tls: bad certificate"}, {"ephemeral-eviction #5355", true, " eviction_manager.go:419] eviction manager: unexpected error when attempting to reduce ephemeral-storage pressure: wanted to free 9223372036854775807 bytes, but freed 0 bytes space with errors in image deletion"}, + {"anonymous-auth", true, "AnonymousAuth is not allowed with the AlwaysAllow authorizer. Resetting AnonymousAuth to false. You should use a different authorizer"}, + {"disk-pressure #7073", true, "eviction_manager.go:159] Failed to admit pod kindnet-jpzzf_kube-system(b63b1ee0-0fc6-428f-8e67-e357464f579c) - node has conditions: [DiskPressure]"}, + {"csi timeout", true, `Failed to initialize CSINodeInfo: error updating CSINode annotation: timed out waiting for the condition; caused by: csinodes.storage.k8s.io "m01" is forbidden: User "system:node:m01" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope`}, + {"node registration permissions", true, `Unable to register node "m01" with API server: nodes is forbidden: User "system:node:m01" cannot create resource "nodes" in API group "" at the cluster scope`}, + {"regular kubelet refused", false, `kubelet_node_status.go:92] Unable to register node "m01" with API server: Post https://localhost:8443/api/v1/nodes: dial tcp 127.0.0.1:8443: connect: connection refused`}, + {"regular csi refused", false, `Failed to initialize CSINodeInfo: error updating CSINode annotation: timed out waiting for the condition; caused by: Get https://localhost:8443/apis/storage.k8s.io/v1/csinodes/m01: dial tcp 127.0.0.1:8443: connect: connection refused`}, + {"apiserver crashloop", true, `pod_workers.go:191] Error syncing pod 9f8ee739bd14e8733f807eb2be99768f ("kube-apiserver-m01_kube-system(9f8ee739bd14e8733f807eb2be99768f)"), skipping: failed to "StartContainer" for "kube-apiserver" with CrashLoopBackOff: "back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-m01_kube-system(9f8ee739bd14e8733f807eb2be99768f)`}, + {"kubelet node timeout", false, `failed to ensure node lease exists, will retry in 6.4s, error: Get https://localhost:8443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/m01?timeout=10s: dial tcp 127.0.0.1:8443: connect: connection refused`}, + {"rbac misconfiguration", true, `leases.coordination.k8s.io "m01" is forbidden: User "system:node:m01" cannot get resource "leases" in API group "coordination.k8s.io" in the namespace "kube-node-lease"`}, + {"regular controller init", false, `error retrieving resource lock kube-system/kube-controller-manager: endpoints "kube-controller-manager" is forbidden: User "system:kube-controller-manager" cannot get resource "endpoints" in API group "" in the namespace "kube-system"`}, + {"regular scheduler services init", false, ` k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope`}, + {"regular scheduler nodes init", false, `k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope`}, + {"kubelet rbac fail", true, `k8s.io/kubernetes/pkg/kubelet/kubelet.go:526: Failed to list *v1.Node: nodes "m01" is forbidden: User "system:node:m01" cannot list resource "nodes" in API group "" at the cluster scope`}, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { diff --git a/pkg/minikube/machine/cache_images.go b/pkg/minikube/machine/cache_images.go index 34b4379314..93f735467c 100644 --- a/pkg/minikube/machine/cache_images.go +++ b/pkg/minikube/machine/cache_images.go @@ -21,6 +21,7 @@ import ( "os" "path" "path/filepath" + "strings" "sync" "time" @@ -92,7 +93,11 @@ func LoadImages(cc *config.ClusterConfig, runner command.Runner, images []string for _, image := range images { image := image g.Go(func() error { - err := needsTransfer(imgClient, image, cr) + // Put a ten second limit on deciding if an image needs transfer + // because it takes much less than that time to just transfer the image. + // This is needed because if running in offline mode, we can spend minutes here + // waiting for i/o timeout. + err := timedNeedsTransfer(imgClient, image, cr, 10*time.Second) if err == nil { return nil } @@ -107,6 +112,28 @@ func LoadImages(cc *config.ClusterConfig, runner command.Runner, images []string return nil } +func timedNeedsTransfer(imgClient *client.Client, imgName string, cr cruntime.Manager, t time.Duration) error { + timeout := make(chan bool, 1) + go func() { + time.Sleep(t) + timeout <- true + }() + + transferFinished := make(chan bool, 1) + var err error + go func() { + err = needsTransfer(imgClient, imgName, cr) + transferFinished <- true + }() + + select { + case <-transferFinished: + return err + case <-timeout: + return fmt.Errorf("needs transfer timed out in %f seconds", t.Seconds()) + } +} + // needsTransfer returns an error if an image needs to be retransfered func needsTransfer(imgClient *client.Client, imgName string, cr cruntime.Manager) error { imgDgst := "" // for instance sha256:7c92a2c6bbcb6b6beff92d0a940779769c2477b807c202954c537e2e0deb9bed @@ -132,36 +159,51 @@ func needsTransfer(imgClient *client.Client, imgName string, cr cruntime.Manager // CacheAndLoadImages caches and loads images to all profiles func CacheAndLoadImages(images []string) error { + // This is the most important thing if err := image.SaveToDir(images, constants.ImageCacheDir); err != nil { - return err + return errors.Wrap(err, "save to dir") } + api, err := NewAPIClient() if err != nil { - return err + return errors.Wrap(err, "api") } defer api.Close() profiles, _, err := config.ListProfiles() // need to load image to all profiles if err != nil { return errors.Wrap(err, "list profiles") } + + succeeded := []string{} + failed := []string{} + for _, p := range profiles { // loading images to all running profiles pName := p.Name // capture the loop variable + c, err := config.Load(pName) if err != nil { - return err + // Non-fatal because it may race with profile deletion + glog.Errorf("Failed to load profile %q: %v", pName, err) + failed = append(failed, pName) + continue } + for _, n := range c.Nodes { m := driver.MachineName(*c, n) + status, err := Status(api, m) if err != nil { - glog.Warningf("skipping loading cache for profile %s", pName) glog.Errorf("error getting status for %s: %v", pName, err) - continue // try next machine + failed = append(failed, pName) + continue } + if status == state.Running.String() { // the not running hosts will load on next start h, err := api.Load(m) if err != nil { - return err + glog.Errorf("Failed to load machine %q: %v", m, err) + failed = append(failed, pName) + continue } cr, err := CommandRunner(h) if err != nil { @@ -169,12 +211,18 @@ func CacheAndLoadImages(images []string) error { } err = LoadImages(c, cr, images, constants.ImageCacheDir) if err != nil { + failed = append(failed, pName) glog.Warningf("Failed to load cached images for profile %s. make sure the profile is running. %v", pName, err) } + succeeded = append(succeeded, pName) } } } - return err + + glog.Infof("succeeded pushing to: %s", strings.Join(succeeded, " ")) + glog.Infof("failed pushing to: %s", strings.Join(failed, " ")) + // Live pushes are not considered a failure + return nil } // transferAndLoadImage transfers and loads a single image from the cache diff --git a/pkg/minikube/machine/client.go b/pkg/minikube/machine/client.go index 0121c92358..49f0a901be 100644 --- a/pkg/minikube/machine/client.go +++ b/pkg/minikube/machine/client.go @@ -36,11 +36,11 @@ import ( "github.com/docker/machine/libmachine/host" "github.com/docker/machine/libmachine/mcnutils" "github.com/docker/machine/libmachine/persist" - lib_provision "github.com/docker/machine/libmachine/provision" "github.com/docker/machine/libmachine/ssh" "github.com/docker/machine/libmachine/state" "github.com/docker/machine/libmachine/swarm" "github.com/docker/machine/libmachine/version" + "github.com/golang/glog" "github.com/pkg/errors" "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/driver" @@ -49,7 +49,6 @@ import ( "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/registry" "k8s.io/minikube/pkg/minikube/sshutil" - "k8s.io/minikube/pkg/provision" ) // NewRPCClient gets a new client. @@ -158,6 +157,11 @@ func CommandRunner(h *host.Host) (command.Runner, error) { if driver.IsKIC(h.Driver.DriverName()) { return command.NewKICRunner(h.Name, h.Driver.DriverName()), nil } + return SSHRunner(h) +} + +// SSHRunner returns an SSH runner for the host +func SSHRunner(h *host.Host) (command.Runner, error) { client, err := sshutil.NewSSHClient(h.Driver) if err != nil { return nil, errors.Wrap(err, "getting ssh client for bootstrapper") @@ -167,11 +171,18 @@ func CommandRunner(h *host.Host) (command.Runner, error) { // Create creates the host func (api *LocalClient) Create(h *host.Host) error { + glog.Infof("LocalClient.Create starting") + start := time.Now() + defer func() { + glog.Infof("LocalClient.Create took %s", time.Since(start)) + }() + def := registry.Driver(h.DriverName) if def.Empty() { return fmt.Errorf("driver %q does not exist", h.DriverName) } if def.Init == nil { + // NOTE: This will call provision.DetectProvisioner return api.legacyClient.Create(h) } @@ -209,21 +220,17 @@ func (api *LocalClient) Create(h *host.Host) error { { "provisioning", func() error { + // Skippable because we don't reconfigure Docker? if driver.BareMetal(h.Driver.DriverName()) { return nil } - var pv lib_provision.Provisioner - if driver.IsKIC(h.Driver.DriverName()) { - pv = provision.NewUbuntuProvisioner(h.Driver) - } else { - pv = provision.NewBuildrootProvisioner(h.Driver) - } - return pv.Provision(*h.HostOptions.SwarmOptions, *h.HostOptions.AuthOptions, *h.HostOptions.EngineOptions) + return provisionDockerMachine(h) }, }, } for _, step := range steps { + if err := step.f(); err != nil { return errors.Wrap(err, step.name) } diff --git a/pkg/minikube/machine/cluster_test.go b/pkg/minikube/machine/cluster_test.go index 0c32c8642a..01c99de92e 100644 --- a/pkg/minikube/machine/cluster_test.go +++ b/pkg/minikube/machine/cluster_test.go @@ -17,11 +17,13 @@ limitations under the License. package machine import ( + "flag" "fmt" "testing" "time" // Driver used by testdata + "k8s.io/minikube/pkg/minikube/constants" _ "k8s.io/minikube/pkg/minikube/registry/drvs/virtualbox" "github.com/docker/machine/libmachine/drivers" @@ -41,6 +43,11 @@ func createMockDriverHost(c config.ClusterConfig, n config.Node) (interface{}, e } func RegisterMockDriver(t *testing.T) { + // Debugging this test is a nightmare. + if err := flag.Lookup("logtostderr").Value.Set("true"); err != nil { + t.Logf("unable to set logtostderr: %v", err) + } + t.Helper() if !registry.Driver(driver.Mock).Empty() { return @@ -108,6 +115,7 @@ func TestCreateHost(t *testing.T) { func TestStartHostExists(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) + // Create an initial host. ih, err := createHost(api, defaultClusterConfig, config.Node{Name: "minikube"}) if err != nil { @@ -129,7 +137,7 @@ func TestStartHostExists(t *testing.T) { n := config.Node{Name: ih.Name} // This should pass without calling Create because the host exists already. - h, err := StartHost(api, mc, n) + h, _, err := StartHost(api, mc, n) if err != nil { t.Fatalf("Error starting host: %v", err) } @@ -139,9 +147,6 @@ func TestStartHostExists(t *testing.T) { if s, _ := h.Driver.GetState(); s != state.Running { t.Fatalf("Machine not started.") } - if !md.Provisioner.Provisioned { - t.Fatalf("Expected provision to be called") - } } func TestStartHostErrMachineNotExist(t *testing.T) { @@ -163,9 +168,9 @@ func TestStartHostErrMachineNotExist(t *testing.T) { n := config.Node{Name: h.Name} // This should pass with creating host, while machine does not exist. - h, err = StartHost(api, mc, n) + h, _, err = StartHost(api, mc, n) if err != nil { - if err != ErrorMachineNotExist { + if err != constants.ErrMachineMissing { t.Fatalf("Error starting host: %v", err) } } @@ -173,8 +178,10 @@ func TestStartHostErrMachineNotExist(t *testing.T) { mc.Name = h.Name n.Name = h.Name + n.Name = h.Name + // Second call. This should pass without calling Create because the host exists already. - h, err = StartHost(api, mc, n) + h, _, err = StartHost(api, mc, n) if err != nil { t.Fatalf("Error starting host: %v", err) } @@ -185,9 +192,6 @@ func TestStartHostErrMachineNotExist(t *testing.T) { if s, _ := h.Driver.GetState(); s != state.Running { t.Fatalf("Machine not started.") } - if !md.Provisioner.Provisioned { - t.Fatalf("Expected provision to be called") - } } func TestStartStoppedHost(t *testing.T) { @@ -207,7 +211,7 @@ func TestStartStoppedHost(t *testing.T) { mc := defaultClusterConfig mc.Name = h.Name n := config.Node{Name: h.Name} - h, err = StartHost(api, mc, n) + h, _, err = StartHost(api, mc, n) if err != nil { t.Fatal("Error starting host.") } @@ -223,9 +227,6 @@ func TestStartStoppedHost(t *testing.T) { t.Fatalf("Machine must be saved after starting.") } - if !md.Provisioner.Provisioned { - t.Fatalf("Expected provision to be called") - } } func TestStartHost(t *testing.T) { @@ -235,7 +236,7 @@ func TestStartHost(t *testing.T) { md := &tests.MockDetector{Provisioner: &tests.MockProvisioner{}} provision.SetDetector(md) - h, err := StartHost(api, defaultClusterConfig, config.Node{Name: "minikube"}) + h, _, err := StartHost(api, defaultClusterConfig, config.Node{Name: "minikube"}) if err != nil { t.Fatal("Error starting host.") } @@ -269,7 +270,7 @@ func TestStartHostConfig(t *testing.T) { DockerOpt: []string{"param=value"}, } - h, err := StartHost(api, cfg, config.Node{Name: "minikube"}) + h, _, err := StartHost(api, cfg, config.Node{Name: "minikube"}) if err != nil { t.Fatal("Error starting host.") } @@ -422,16 +423,19 @@ func TestCreateSSHShell(t *testing.T) { t.Fatalf("Error starting ssh server: %v", err) } + m := viper.GetString("profile") + d := &tests.MockDriver{ Port: port, CurrentState: state.Running, BaseDriver: drivers.BaseDriver{ - IPAddress: "127.0.0.1", - SSHKeyPath: "", + IPAddress: "127.0.0.1", + SSHKeyPath: "", + MachineName: m, }, T: t, } - api.Hosts[viper.GetString("profile")] = &host.Host{Driver: d} + api.Hosts[m] = &host.Host{Driver: d} cc := defaultClusterConfig cc.Name = viper.GetString("profile") diff --git a/pkg/minikube/machine/delete.go b/pkg/minikube/machine/delete.go index 518f7b3fc6..2132d16737 100644 --- a/pkg/minikube/machine/delete.go +++ b/pkg/minikube/machine/delete.go @@ -22,11 +22,13 @@ import ( "time" "github.com/docker/machine/libmachine" + "github.com/docker/machine/libmachine/host" "github.com/docker/machine/libmachine/mcnerror" "github.com/docker/machine/libmachine/state" "github.com/golang/glog" "github.com/pkg/errors" "k8s.io/minikube/pkg/drivers/kic/oci" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/out" ) @@ -66,28 +68,63 @@ func DeleteHost(api libmachine.API, machineName string) error { // Get the status of the host. Ensure that it exists before proceeding ahead. status, err := Status(api, machineName) if err != nil { - // Warn, but proceed - out.WarningT(`Unable to get host status for "{{.name}}": {{.error}}`, out.V{"name": machineName, "error": err}) + // Assume that the host has already been deleted, log and return + glog.Infof("Unable to get host status for %s, assuming it has already been deleted: %v", machineName, err) + return nil } if status == state.None.String() { return mcnerror.ErrHostDoesNotExist{Name: machineName} } - // This is slow if SSH is not responding, but HyperV hangs otherwise, See issue #2914 + // Hyper-V requires special care to avoid ACPI and file locking issues if host.Driver.DriverName() == driver.HyperV { - if err := trySSHPowerOff(host); err != nil { - glog.Infof("Unable to power off minikube because the host was not found.") + if err := StopHost(api, machineName); err != nil { + glog.Warningf("stop host: %v", err) } - out.T(out.DeletingHost, "Successfully powered off Hyper-V. minikube driver -- {{.driver}}", out.V{"driver": host.Driver.DriverName()}) + // Hack: give the Hyper-V VM more time to stop before deletion + time.Sleep(1 * time.Second) } out.T(out.DeletingHost, `Deleting "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": machineName, "driver_name": host.DriverName}) - if err := host.Driver.Remove(); err != nil { - return errors.Wrap(err, "host remove") + return delete(api, host, machineName) +} + +// delete removes a host and it's local data files +func delete(api libmachine.API, h *host.Host, machineName string) error { + if err := h.Driver.Remove(); err != nil { + glog.Warningf("remove failed, will retry: %v", err) + time.Sleep(1 * time.Second) + + nerr := h.Driver.Remove() + if nerr != nil { + return errors.Wrap(nerr, "host remove retry") + } } + if err := api.Remove(machineName); err != nil { return errors.Wrap(err, "api remove") } return nil } + +// demolish destroys a host by any means necessary - use only if state is inconsistent +func demolish(api libmachine.API, cc config.ClusterConfig, n config.Node, h *host.Host) { + machineName := driver.MachineName(cc, n) + glog.Infof("DEMOLISHING %s ...", machineName) + + // This will probably fail + err := stop(h) + if err != nil { + glog.Infof("stophost failed (probably ok): %v", err) + } + + // For 95% of cases, this should be enough + err = DeleteHost(api, machineName) + if err != nil { + glog.Warningf("deletehost failed: %v", err) + } + + err = delete(api, h, machineName) + glog.Warningf("delete failed (probably ok) %v", err) +} diff --git a/pkg/minikube/machine/filesync_test.go b/pkg/minikube/machine/filesync_test.go index 99a674e8e8..143c3e9ab7 100644 --- a/pkg/minikube/machine/filesync_test.go +++ b/pkg/minikube/machine/filesync_test.go @@ -149,7 +149,7 @@ func TestAssetsFromDir(t *testing.T) { got := make(map[string]string) for _, actualFile := range actualFiles { - got[actualFile.GetAssetName()] = actualFile.GetTargetDir() + got[actualFile.GetSourcePath()] = actualFile.GetTargetDir() } if diff := cmp.Diff(want, got); diff != "" { t.Errorf("files differ: (-want +got)\n%s", diff) diff --git a/pkg/minikube/machine/fix.go b/pkg/minikube/machine/fix.go index 25f921cd5a..8aa74723e1 100644 --- a/pkg/minikube/machine/fix.go +++ b/pkg/minikube/machine/fix.go @@ -25,10 +25,8 @@ import ( "time" "github.com/docker/machine/drivers/virtualbox" - "github.com/docker/machine/libmachine" "github.com/docker/machine/libmachine/host" - "github.com/docker/machine/libmachine/provision" "github.com/docker/machine/libmachine/state" "github.com/golang/glog" "github.com/pkg/errors" @@ -36,7 +34,6 @@ import ( "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/out" - "k8s.io/minikube/pkg/util/retry" ) // hostRunner is a minimal host.Host based interface for running commands @@ -50,15 +47,8 @@ const ( maxClockDesyncSeconds = 2.1 ) -var ( - // ErrorMachineNotExist is returned when virtual machine does not exist due to user interrupt cancel(i.e. Ctrl + C) - ErrorMachineNotExist = errors.New("machine does not exist") -) - // fixHost fixes up a previously configured VM so that it is ready to run Kubernetes func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.Host, error) { - out.T(out.Waiting, "Reconfiguring existing host ...") - start := time.Now() glog.Infof("fixHost starting: %s", n.Name) defer func() { @@ -70,23 +60,22 @@ func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host. return h, errors.Wrap(err, "Error loading existing host. Please try running [minikube delete], then run [minikube start] again.") } + driverName := h.Driver.DriverName() + // check if need to re-run docker-env - maybeWarnAboutEvalEnv(cc.Driver, cc.Name) + maybeWarnAboutEvalEnv(driverName, cc.Name) h, err = recreateIfNeeded(api, cc, n, h) if err != nil { return h, err } - e := engineOptions(cc) - if len(e.Env) > 0 { + // Avoid reprovisioning "none" driver because provision.Detect requires SSH + if !driver.BareMetal(h.Driver.DriverName()) { + e := engineOptions(cc) h.HostOptions.EngineOptions.Env = e.Env - glog.Infof("Detecting provisioner ...") - provisioner, err := provision.DetectProvisioner(h.Driver) + err = provisionDockerMachine(h) if err != nil { - return h, errors.Wrap(err, "detecting provisioner") - } - if err := provisioner.Provision(*h.HostOptions.SwarmOptions, *h.HostOptions.AuthOptions, *h.HostOptions.EngineOptions); err != nil { return h, errors.Wrap(err, "provision") } } @@ -100,63 +89,63 @@ func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host. } if driver.BareMetal(h.Driver.DriverName()) { - glog.Infof("%s is local, skipping auth/time setup (requires ssh)", h.Driver.DriverName()) + glog.Infof("%s is local, skipping auth/time setup (requires ssh)", driverName) return h, nil } - glog.Infof("Configuring auth for driver %s ...", h.Driver.DriverName()) - if err := h.ConfigureAuth(); err != nil { - return h, &retry.RetriableError{Err: errors.Wrap(err, "Error configuring auth on host")} - } - return h, ensureSyncedGuestClock(h, cc.Driver) + return h, ensureSyncedGuestClock(h, driverName) } func recreateIfNeeded(api libmachine.API, cc config.ClusterConfig, n config.Node, h *host.Host) (*host.Host, error) { - s, err := h.Driver.GetState() - if err != nil || s == state.Stopped || s == state.None { - // If virtual machine does not exist due to user interrupt cancel(i.e. Ctrl + C), recreate virtual machine - me, err := machineExists(h.Driver.DriverName(), s, err) - if !me { - // If the error is that virtual machine does not exist error, handle error(recreate virtual machine) - if err == ErrorMachineNotExist { - // remove virtual machine - if err := h.Driver.Remove(); err != nil { - // skip returning error since it may be before docker image pulling(so, no host exist) - if h.Driver.DriverName() != driver.Docker { - return nil, errors.Wrap(err, "host remove") - } - } - // remove machine config directory - if err := api.Remove(cc.Name); err != nil { - return nil, errors.Wrap(err, "api remove") - } - // recreate virtual machine - out.T(out.Meh, "machine '{{.name}}' does not exist. Proceeding ahead with recreating VM.", out.V{"name": cc.Name}) - h, err = createHost(api, cc, n) - if err != nil { - return nil, errors.Wrap(err, "Error recreating VM") - } - // return ErrMachineNotExist err to initialize preExists flag - return h, ErrorMachineNotExist - } - // If the error is not that virtual machine does not exist error, return error - return nil, errors.Wrap(err, "Error getting state for host") - } - } - + machineName := driver.MachineName(cc, n) machineType := driver.MachineType(cc.Driver) - if s == state.Running { - out.T(out.Running, `Using the running {{.driver_name}} "{{.profile_name}}" {{.machine_type}} ...`, out.V{"driver_name": cc.Driver, "profile_name": cc.Name, "machine_type": machineType}) - } else { - out.T(out.Restarting, `Starting existing {{.driver_name}} {{.machine_type}} for "{{.profile_name}}" ...`, out.V{"driver_name": cc.Driver, "profile_name": cc.Name, "machine_type": machineType}) - if err := h.Driver.Start(); err != nil { - return h, errors.Wrap(err, "driver start") - } - if err := api.Save(h); err != nil { - return h, errors.Wrap(err, "save") + recreated := false + s, serr := h.Driver.GetState() + + glog.Infof("recreateIfNeeded on %s: state=%s err=%v", machineName, s, serr) + if serr != nil || s == state.Stopped || s == state.None { + // If virtual machine does not exist due to user interrupt cancel(i.e. Ctrl + C), recreate virtual machine + me, err := machineExists(h.Driver.DriverName(), s, serr) + glog.Infof("exists: %v err=%v", me, err) + glog.Infof("%q vs %q", err, constants.ErrMachineMissing) + + if !me || err == constants.ErrMachineMissing { + out.T(out.Shrug, `{{.driver_name}} "{{.cluster}}" {{.machine_type}} is missing, will recreate.`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType}) + demolish(api, cc, n, h) + + glog.Infof("Sleeping 1 second for extra luck!") + time.Sleep(1 * time.Second) + + h, err = createHost(api, cc, n) + if err != nil { + return nil, errors.Wrap(err, "recreate") + } + + recreated = true + s, serr = h.Driver.GetState() } } + if serr != constants.ErrMachineMissing { + glog.Warningf("unexpected machine state, will restart: %v", serr) + } + + if s == state.Running { + if !recreated { + out.T(out.Running, `Updating the running {{.driver_name}} "{{.cluster}}" {{.machine_type}} ...`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType}) + } + return h, nil + } + + if !recreated { + out.T(out.Restarting, `Restarting existing {{.driver_name}} {{.machine_type}} for "{{.cluster}}" ...`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType}) + } + if err := h.Driver.Start(); err != nil { + return h, errors.Wrap(err, "driver start") + } + if err := api.Save(h); err != nil { + return h, errors.Wrap(err, "save") + } return h, nil } @@ -172,7 +161,7 @@ func maybeWarnAboutEvalEnv(drver string, name string) { } out.T(out.Notice, "Noticed you have an activated docker-env on {{.driver_name}} driver in this terminal:", out.V{"driver_name": drver}) // TODO: refactor docker-env package to generate only eval command per shell. https://github.com/kubernetes/minikube/issues/6887 - out.T(out.Warning, `Please re-eval your docker-env, To ensure your environment variables have updated ports: + out.WarningT(`Please re-eval your docker-env, To ensure your environment variables have updated ports: 'minikube -p {{.profile_name}} docker-env' @@ -233,7 +222,7 @@ func adjustGuestClock(h hostRunner, t time.Time) error { func machineExistsState(s state.State, err error) (bool, error) { if s == state.None { - return false, ErrorMachineNotExist + return false, constants.ErrMachineMissing } return true, err } @@ -242,7 +231,7 @@ func machineExistsError(s state.State, err error, drverr error) (bool, error) { _ = s // not used if err == drverr { // if the error matches driver error - return false, ErrorMachineNotExist + return false, constants.ErrMachineMissing } return true, err } @@ -250,7 +239,7 @@ func machineExistsError(s state.State, err error, drverr error) (bool, error) { func machineExistsMessage(s state.State, err error, msg string) (bool, error) { if s == state.None || (err != nil && err.Error() == msg) { // if the error contains the message - return false, ErrorMachineNotExist + return false, constants.ErrMachineMissing } return true, err } @@ -258,10 +247,10 @@ func machineExistsMessage(s state.State, err error, msg string) (bool, error) { func machineExistsDocker(s state.State, err error) (bool, error) { if s == state.Error { // if the kic image is not present on the host machine, when user cancel `minikube start`, state.Error will be return - return false, ErrorMachineNotExist + return false, constants.ErrMachineMissing } else if s == state.None { // if the kic image is present on the host machine, when user cancel `minikube start`, state.None will be return - return false, ErrorMachineNotExist + return false, constants.ErrMachineMissing } return true, err } @@ -293,7 +282,7 @@ func machineExists(d string, s state.State, err error) (bool, error) { return machineExistsDocker(s, err) case driver.Mock: if s == state.Error { - return false, ErrorMachineNotExist + return false, constants.ErrMachineMissing } return true, err default: diff --git a/pkg/minikube/machine/info.go b/pkg/minikube/machine/info.go index 1bae7253e9..c3b4e06569 100644 --- a/pkg/minikube/machine/info.go +++ b/pkg/minikube/machine/info.go @@ -18,13 +18,14 @@ package machine import ( "io/ioutil" + "os/exec" - "github.com/docker/machine/libmachine/drivers" "github.com/docker/machine/libmachine/provision" "github.com/golang/glog" "github.com/shirou/gopsutil/cpu" "github.com/shirou/gopsutil/disk" "github.com/shirou/gopsutil/mem" + "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/out" ) @@ -80,18 +81,17 @@ func showLocalOsRelease() { } // logRemoteOsRelease shows systemd information about the current linux distribution, on the remote VM -func logRemoteOsRelease(drv drivers.Driver) { - provisioner, err := provision.DetectProvisioner(drv) +func logRemoteOsRelease(r command.Runner) { + rr, err := r.RunCmd(exec.Command("cat", "/etc/os-release")) if err != nil { - glog.Errorf("DetectProvisioner: %v", err) + glog.Infof("remote release failed: %v", err) + } + + osReleaseInfo, err := provision.NewOsRelease(rr.Stdout.Bytes()) + if err != nil { + glog.Errorf("NewOsRelease: %v", err) return } - osReleaseInfo, err := provisioner.GetOsReleaseInfo() - if err != nil { - glog.Errorf("GetOsReleaseInfo: %v", err) - return - } - - glog.Infof("Provisioned with %s", osReleaseInfo.PrettyName) + glog.Infof("Remote host: %s", osReleaseInfo.PrettyName) } diff --git a/pkg/minikube/machine/machine.go b/pkg/minikube/machine/machine.go index 215f240753..26470c3e7a 100644 --- a/pkg/minikube/machine/machine.go +++ b/pkg/minikube/machine/machine.go @@ -17,8 +17,14 @@ limitations under the License. package machine import ( + "time" + "github.com/docker/machine/libmachine/host" + libprovision "github.com/docker/machine/libmachine/provision" + "github.com/golang/glog" "github.com/pkg/errors" + "k8s.io/minikube/pkg/minikube/driver" + "k8s.io/minikube/pkg/provision" ) // Machine contains information about a machine @@ -74,3 +80,31 @@ func LoadMachine(name string) (*Machine, error) { } return &mm, nil } + +// provisionDockerMachine provides fast provisioning of a docker machine +func provisionDockerMachine(h *host.Host) error { + glog.Infof("provisioning docker machine ...") + start := time.Now() + defer func() { + glog.Infof("provisioned docker machine in %s", time.Since(start)) + }() + + p, err := fastDetectProvisioner(h) + if err != nil { + return errors.Wrap(err, "fast detect") + } + return p.Provision(*h.HostOptions.SwarmOptions, *h.HostOptions.AuthOptions, *h.HostOptions.EngineOptions) +} + +// fastDetectProvisioner provides a shortcut for provisioner detection +func fastDetectProvisioner(h *host.Host) (libprovision.Provisioner, error) { + d := h.Driver.DriverName() + switch { + case driver.IsKIC(d): + return provision.NewUbuntuProvisioner(h.Driver), nil + case driver.BareMetal(d): + return libprovision.DetectProvisioner(h.Driver) + default: + return provision.NewBuildrootProvisioner(h.Driver), nil + } +} diff --git a/pkg/minikube/machine/start.go b/pkg/minikube/machine/start.go index 73c982fa80..e42fc9cf62 100644 --- a/pkg/minikube/machine/start.go +++ b/pkg/minikube/machine/start.go @@ -32,6 +32,7 @@ import ( "github.com/juju/mutex" "github.com/pkg/errors" "github.com/spf13/viper" + "golang.org/x/crypto/ssh" "k8s.io/minikube/pkg/drivers/kic/oci" "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" @@ -43,6 +44,7 @@ import ( "k8s.io/minikube/pkg/minikube/sshutil" "k8s.io/minikube/pkg/minikube/vmpath" "k8s.io/minikube/pkg/util/lock" + "k8s.io/minikube/pkg/util/retry" ) var ( @@ -62,28 +64,32 @@ var ( ) // StartHost starts a host VM. -func StartHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*host.Host, error) { +func StartHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*host.Host, bool, error) { + machineName := driver.MachineName(cfg, n) + // Prevent machine-driver boot races, as well as our own certificate race - releaser, err := acquireMachinesLock(cfg.Name) + releaser, err := acquireMachinesLock(machineName) if err != nil { - return nil, errors.Wrap(err, "boot lock") + return nil, false, errors.Wrap(err, "boot lock") } start := time.Now() defer func() { - glog.Infof("releasing machines lock for %q, held for %s", cfg.Name, time.Since(start)) + glog.Infof("releasing machines lock for %q, held for %s", machineName, time.Since(start)) releaser.Release() }() - exists, err := api.Exists(cfg.Name) + exists, err := api.Exists(machineName) if err != nil { - return nil, errors.Wrapf(err, "exists: %s", cfg.Name) + return nil, false, errors.Wrapf(err, "exists: %s", machineName) } if !exists { - glog.Infof("Provisioning new machine with config: %+v", cfg) - return createHost(api, cfg, n) + glog.Infof("Provisioning new machine with config: %+v %+v", cfg, n) + h, err := createHost(api, cfg, n) + return h, exists, err } glog.Infoln("Skipping create...Using existing machine configuration") - return fixHost(api, cfg, n) + h, err := fixHost(api, cfg, n) + return h, exists, err } func engineOptions(cfg config.ClusterConfig) *engine.Options { @@ -98,7 +104,7 @@ func engineOptions(cfg config.ClusterConfig) *engine.Options { } func createHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*host.Host, error) { - glog.Infof("createHost starting for %q (driver=%q)", cfg.Name, cfg.Driver) + glog.Infof("createHost starting for %q (driver=%q)", n.Name, cfg.Driver) start := time.Now() defer func() { glog.Infof("createHost completed in %s", time.Since(start)) @@ -191,6 +197,7 @@ func postStartSetup(h *host.Host, mc config.ClusterConfig) error { } glog.Infof("creating required directories: %v", requiredDirectories) + r, err := commandRunner(h) if err != nil { return errors.Wrap(err, "command runner") @@ -205,7 +212,7 @@ func postStartSetup(h *host.Host, mc config.ClusterConfig) error { showLocalOsRelease() } if driver.IsVM(mc.Driver) { - logRemoteOsRelease(h.Driver) + logRemoteOsRelease(r) } return syncLocalAssets(r) } @@ -229,11 +236,19 @@ func commandRunner(h *host.Host) (command.Runner, error) { } glog.Infof("Creating SSH client and returning SSHRunner for %q driver", d) - client, err := sshutil.NewSSHClient(h.Driver) - if err != nil { - return nil, errors.Wrap(err, "ssh client") + + // Retry in order to survive an ssh restart, which sometimes happens due to provisioning + var sc *ssh.Client + getSSH := func() (err error) { + sc, err = sshutil.NewSSHClient(h.Driver) + return err } - return command.NewSSHRunner(client), nil + + if err := retry.Expo(getSSH, 250*time.Millisecond, 2*time.Second); err != nil { + return nil, err + } + + return command.NewSSHRunner(sc), nil } // acquireMachinesLock protects against code that is not parallel-safe (libmachine, cert setup) diff --git a/pkg/minikube/machine/stop.go b/pkg/minikube/machine/stop.go index 97931a2b00..fafe09e446 100644 --- a/pkg/minikube/machine/stop.go +++ b/pkg/minikube/machine/stop.go @@ -17,6 +17,8 @@ limitations under the License. package machine import ( + "time" + "github.com/docker/machine/libmachine" "github.com/docker/machine/libmachine/host" "github.com/docker/machine/libmachine/mcnerror" @@ -30,26 +32,36 @@ import ( // StopHost stops the host VM, saving state to disk. func StopHost(api libmachine.API, machineName string) error { - host, err := api.Load(machineName) + glog.Infof("StopHost: %v", machineName) + h, err := api.Load(machineName) if err != nil { return errors.Wrapf(err, "load") } - out.T(out.Stopping, `Stopping "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": machineName, "driver_name": host.DriverName}) - if host.DriverName == driver.HyperV { + out.T(out.Stopping, `Stopping "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": machineName, "driver_name": h.DriverName}) + return stop(h) +} + +// stop forcibly stops a host without needing to load +func stop(h *host.Host) error { + start := time.Now() + if h.DriverName == driver.HyperV { glog.Infof("As there are issues with stopping Hyper-V VMs using API, trying to shut down using SSH") - if err := trySSHPowerOff(host); err != nil { + if err := trySSHPowerOff(h); err != nil { return errors.Wrap(err, "ssh power off") } } - if err := host.Stop(); err != nil { - alreadyInStateError, ok := err.(mcnerror.ErrHostAlreadyInState) - if ok && alreadyInStateError.State == state.Stopped { + if err := h.Stop(); err != nil { + glog.Infof("stop err: %v", err) + st, ok := err.(mcnerror.ErrHostAlreadyInState) + if ok && st.State == state.Stopped { + glog.Infof("host is already stopped") return nil } - return &retry.RetriableError{Err: errors.Wrapf(err, "Stop: %s", machineName)} + return &retry.RetriableError{Err: errors.Wrap(err, "stop")} } + glog.Infof("stop complete within %s", time.Since(start)) return nil } diff --git a/pkg/minikube/mustload/mustload.go b/pkg/minikube/mustload/mustload.go new file mode 100644 index 0000000000..0910ba35fc --- /dev/null +++ b/pkg/minikube/mustload/mustload.go @@ -0,0 +1,177 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package mustload loads minikube clusters, exiting with user-friendly messages +package mustload + +import ( + "fmt" + "net" + "os" + + "github.com/docker/machine/libmachine" + "github.com/docker/machine/libmachine/host" + "github.com/docker/machine/libmachine/state" + "github.com/golang/glog" + "k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify" + "k8s.io/minikube/pkg/minikube/command" + "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/constants" + "k8s.io/minikube/pkg/minikube/driver" + "k8s.io/minikube/pkg/minikube/exit" + "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/out" +) + +// ClusterController holds all the needed information for a minikube cluster +type ClusterController struct { + Config *config.ClusterConfig + API libmachine.API + CP ControlPlane +} + +// ControlPlane holds all the needed information for the k8s control plane +type ControlPlane struct { + // Host is the libmachine host object + Host *host.Host + // Node is our internal control object + Node *config.Node + // Runner provides command execution + Runner command.Runner + // Hostname is the host-accesible target for the apiserver + Hostname string + // Port is the host-accessible port for the apiserver + Port int + // IP is the host-accessible IP for the control plane + IP net.IP +} + +// Partial is a cmd-friendly way to load a cluster which may or may not be running +func Partial(name string) (libmachine.API, *config.ClusterConfig) { + glog.Infof("Loading cluster: %s", name) + api, err := machine.NewAPIClient() + if err != nil { + exit.WithError("libmachine failed", err) + } + + cc, err := config.Load(name) + if err != nil { + if config.IsNotExist(err) { + out.T(out.Shrug, `There is no local cluster named "{{.cluster}}"`, out.V{"cluster": name}) + exitTip("start", name, exit.Data) + } + exit.WithError("Error getting cluster config", err) + } + + return api, cc +} + +// Running is a cmd-friendly way to load a running cluster +func Running(name string) ClusterController { + api, cc := Partial(name) + + cp, err := config.PrimaryControlPlane(cc) + if err != nil { + exit.WithError("Unable to find control plane", err) + } + + machineName := driver.MachineName(*cc, cp) + hs, err := machine.Status(api, machineName) + if err != nil { + exit.WithError("Unable to get machine status", err) + } + + if hs == state.None.String() { + out.T(out.Shrug, `The control plane node "{{.name}}" does not exist.`, out.V{"name": cp.Name}) + exitTip("start", name, exit.Unavailable) + } + + if hs == state.Stopped.String() { + out.T(out.Shrug, `The control plane node must be running for this command`) + exitTip("start", name, exit.Unavailable) + } + + if hs != state.Running.String() { + out.T(out.Shrug, `The control plane node is not running (state={{.state}})`, out.V{"name": cp.Name, "state": hs}) + exitTip("start", name, exit.Unavailable) + } + + host, err := machine.LoadHost(api, name) + if err != nil { + exit.WithError("Unable to load host", err) + } + + cr, err := machine.CommandRunner(host) + if err != nil { + exit.WithError("Unable to get command runner", err) + } + + hostname, ip, port, err := driver.ControlPaneEndpoint(cc, &cp, host.DriverName) + if err != nil { + exit.WithError("Unable to get forwarded endpoint", err) + } + + return ClusterController{ + API: api, + Config: cc, + CP: ControlPlane{ + Runner: cr, + Host: host, + Node: &cp, + Hostname: hostname, + IP: ip, + Port: port, + }, + } +} + +// Healthy is a cmd-friendly way to load a healthy cluster +func Healthy(name string) ClusterController { + co := Running(name) + + as, err := kverify.APIServerStatus(co.CP.Runner, co.CP.Hostname, co.CP.Port) + if err != nil { + out.FailureT(`Unable to get control plane status: {{.error}}`, out.V{"error": err}) + exitTip("delete", name, exit.Unavailable) + } + + if as == state.Paused { + out.T(out.Shrug, `The control plane for "{{.name}}" is paused!`, out.V{"name": name}) + exitTip("unpause", name, exit.Unavailable) + } + + if as != state.Running { + out.T(out.Shrug, `This control plane is not running! (state={{.state}})`, out.V{"state": as.String()}) + out.WarningT(`This is unusual - you may want to investigate using "{{.command}}"`, out.V{"command": ExampleCmd(name, "logs")}) + exitTip("start", name, exit.Unavailable) + } + return co +} + +// ExampleCmd Return a minikube command containing the current profile name +func ExampleCmd(cname string, action string) string { + if cname != constants.DefaultClusterName { + return fmt.Sprintf("minikube %s -p %s", action, cname) + } + return fmt.Sprintf("minikube %s", action) +} + +// exitTip returns an action tip and exits +func exitTip(action string, profile string, code int) { + command := ExampleCmd(profile, action) + out.T(out.Workaround, `To fix this, run: "{{.command}}"`, out.V{"command": command}) + os.Exit(code) +} diff --git a/pkg/minikube/node/cache.go b/pkg/minikube/node/cache.go index 293424fb8d..ffbde22e84 100644 --- a/pkg/minikube/node/cache.go +++ b/pkg/minikube/node/cache.go @@ -21,6 +21,7 @@ import ( "runtime" "github.com/golang/glog" + "github.com/pkg/errors" "github.com/spf13/viper" "golang.org/x/sync/errgroup" cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" @@ -35,8 +36,13 @@ import ( "k8s.io/minikube/pkg/minikube/out" ) -// beginCacheKubernetesImages caches images required for kubernetes version in the background -func beginCacheKubernetesImages(g *errgroup.Group, imageRepository string, k8sVersion, cRuntime string) { +const ( + cacheImages = "cache-images" + cacheImageConfigKey = "cache" +) + +// BeginCacheKubernetesImages caches images required for kubernetes version in the background +func beginCacheKubernetesImages(g *errgroup.Group, imageRepository string, k8sVersion string, cRuntime string) { if download.PreloadExists(k8sVersion, cRuntime) { glog.Info("Caching tarball of preloaded images") err := download.Preload(k8sVersion, cRuntime) @@ -47,7 +53,7 @@ func beginCacheKubernetesImages(g *errgroup.Group, imageRepository string, k8sVe glog.Warningf("Error downloading preloaded artifacts will continue without preload: %v", err) } - if !viper.GetBool("cache-images") { + if !viper.GetBool(cacheImages) { return } @@ -56,6 +62,7 @@ func beginCacheKubernetesImages(g *errgroup.Group, imageRepository string, k8sVe }) } +// HandleDownloadOnly caches appropariate binaries and images func handleDownloadOnly(cacheGroup, kicGroup *errgroup.Group, k8sVersion string) { // If --download-only, complete the remaining downloads and exit. if !viper.GetBool("download-only") { @@ -74,7 +81,6 @@ func handleDownloadOnly(cacheGroup, kicGroup *errgroup.Group, k8sVersion string) } out.T(out.Check, "Download complete!") os.Exit(0) - } // CacheKubectlBinary caches the kubectl binary @@ -92,8 +98,9 @@ func doCacheBinaries(k8sVersion string) error { return machine.CacheBinariesForBootstrapper(k8sVersion, viper.GetString(cmdcfg.Bootstrapper)) } -// beginDownloadKicArtifacts downloads the kic image + preload tarball, returns true if preload is available +// BeginDownloadKicArtifacts downloads the kic image + preload tarball, returns true if preload is available func beginDownloadKicArtifacts(g *errgroup.Group) { + out.T(out.Pulling, "Pulling base image ...") glog.Info("Beginning downloading kic artifacts") g.Go(func() error { glog.Infof("Downloading %s to local daemon", kic.BaseImage) @@ -101,6 +108,7 @@ func beginDownloadKicArtifacts(g *errgroup.Group) { }) } +// WaitDownloadKicArtifacts blocks until the required artifacts for KIC are downloaded. func waitDownloadKicArtifacts(g *errgroup.Group) { if err := g.Wait(); err != nil { glog.Errorln("Error downloading kic artifacts: ", err) @@ -109,7 +117,7 @@ func waitDownloadKicArtifacts(g *errgroup.Group) { glog.Info("Successfully downloaded all kic artifacts") } -// waitCacheRequiredImages blocks until the required images are all cached. +// WaitCacheRequiredImages blocks until the required images are all cached. func waitCacheRequiredImages(g *errgroup.Group) { if !viper.GetBool(cacheImages) { return @@ -132,10 +140,23 @@ func saveImagesToTarFromConfig() error { return image.SaveToDir(images, constants.ImageCacheDir) } +// CacheAndLoadImagesInConfig loads the images currently in the config file +// called by 'start' and 'cache reload' commands. +func CacheAndLoadImagesInConfig() error { + images, err := imagesInConfigFile() + if err != nil { + return errors.Wrap(err, "images") + } + if len(images) == 0 { + return nil + } + return machine.CacheAndLoadImages(images) +} + func imagesInConfigFile() ([]string, error) { configFile, err := config.ReadConfig(localpath.ConfigFile()) if err != nil { - return nil, err + return nil, errors.Wrap(err, "read") } if values, ok := configFile[cacheImageConfigKey]; ok { var images []string @@ -146,16 +167,3 @@ func imagesInConfigFile() ([]string, error) { } return []string{}, nil } - -// CacheAndLoadImagesInConfig loads the images currently in the config file -// called by 'start' and 'cache reload' commands. -func CacheAndLoadImagesInConfig() error { - images, err := imagesInConfigFile() - if err != nil { - return err - } - if len(images) == 0 { - return nil - } - return machine.CacheAndLoadImages(images) -} diff --git a/pkg/minikube/node/config.go b/pkg/minikube/node/config.go index b29867f1b6..5b646d24d1 100644 --- a/pkg/minikube/node/config.go +++ b/pkg/minikube/node/config.go @@ -18,167 +18,39 @@ package node import ( "fmt" - "net" "os" "os/exec" "path/filepath" "strconv" + "sync" - "github.com/blang/semver" - "github.com/docker/machine/libmachine" - "github.com/docker/machine/libmachine/host" "github.com/golang/glog" - "github.com/pkg/errors" "github.com/spf13/viper" - cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" - "k8s.io/minikube/pkg/drivers/kic/oci" - "k8s.io/minikube/pkg/minikube/bootstrapper" - "k8s.io/minikube/pkg/minikube/cluster" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/cruntime" - "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" - "k8s.io/minikube/pkg/minikube/kubeconfig" "k8s.io/minikube/pkg/minikube/localpath" - "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/util/lock" ) -var ( - // DockerEnv contains the environment variables - DockerEnv []string - // DockerOpt contains the option parameters - DockerOpt []string - // ExtraOptions contains extra options (if any) - ExtraOptions config.ExtraOptionSlice - // AddonList contains the list of addons - AddonList []string -) - -// configureRuntimes does what needs to happen to get a runtime going. -func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config.KubernetesConfig, kv semver.Version) cruntime.Manager { - co := cruntime.Config{ - Type: viper.GetString(containerRuntime), - Runner: runner, ImageRepository: k8s.ImageRepository, - KubernetesVersion: kv, - } - cr, err := cruntime.New(co) - if err != nil { - exit.WithError("Failed runtime", err) - } - - disableOthers := true - if driver.BareMetal(drvName) { - disableOthers = false - } - - // Preload is overly invasive for bare metal, and caching is not meaningful. KIC handled elsewhere. - if driver.IsVM(drvName) { - if err := cr.Preload(k8s); err != nil { - switch err.(type) { - case *cruntime.ErrISOFeature: - out.T(out.Tip, "Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'", out.V{"error": err}) - default: - glog.Warningf("%s preload failed: %v, falling back to caching images", cr.Name(), err) - } - - if err := machine.CacheImagesForBootstrapper(k8s.ImageRepository, k8s.KubernetesVersion, viper.GetString(cmdcfg.Bootstrapper)); err != nil { - exit.WithError("Failed to cache images", err) - } - } - } - - err = cr.Enable(disableOthers) - if err != nil { - exit.WithError("Failed to enable container runtime", err) - } - - return cr -} - func showVersionInfo(k8sVersion string, cr cruntime.Manager) { version, _ := cr.Version() out.T(cr.Style(), "Preparing Kubernetes {{.k8sVersion}} on {{.runtime}} {{.runtimeVersion}} ...", out.V{"k8sVersion": k8sVersion, "runtime": cr.Name(), "runtimeVersion": version}) - for _, v := range DockerOpt { + for _, v := range config.DockerOpt { out.T(out.Option, "opt {{.docker_option}}", out.V{"docker_option": v}) } - for _, v := range DockerEnv { + for _, v := range config.DockerEnv { out.T(out.Option, "env {{.docker_env}}", out.V{"docker_env": v}) } } -// setupKubeAdm adds any requested files into the VM before Kubernetes is started -func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, node config.Node) bootstrapper.Bootstrapper { - bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cfg, node) - if err != nil { - exit.WithError("Failed to get bootstrapper", err) - } - for _, eo := range ExtraOptions { - out.T(out.Option, "{{.extra_option_component_name}}.{{.key}}={{.value}}", out.V{"extra_option_component_name": eo.Component, "key": eo.Key, "value": eo.Value}) - } - // Loads cached images, generates config files, download binaries - if err := bs.UpdateCluster(cfg); err != nil { - exit.WithError("Failed to update cluster", err) - } - if err := bs.SetupCerts(cfg.KubernetesConfig, node); err != nil { - exit.WithError("Failed to setup certs", err) - } - return bs -} - -func setupKubeconfig(h *host.Host, cc *config.ClusterConfig, n *config.Node, clusterName string) (*kubeconfig.Settings, error) { - addr, err := apiServerURL(*h, *cc, *n) - if err != nil { - exit.WithError("Failed to get api server URL", err) - } - - kcs := &kubeconfig.Settings{ - ClusterName: clusterName, - ClusterServerAddress: addr, - ClientCertificate: localpath.MakeMiniPath("client.crt"), - ClientKey: localpath.MakeMiniPath("client.key"), - CertificateAuthority: localpath.MakeMiniPath("ca.crt"), - KeepContext: viper.GetBool(keepContext), - EmbedCerts: viper.GetBool(embedCerts), - } - - kcs.SetPath(kubeconfig.PathFromEnv()) - if err := kubeconfig.Update(kcs); err != nil { - return kcs, err - } - return kcs, nil -} - -// apiServerURL returns a URL to end user can reach to the api server -func apiServerURL(h host.Host, cc config.ClusterConfig, n config.Node) (string, error) { - hostname := "" - port := n.Port - var err error - if driver.IsKIC(h.DriverName) { - // for kic drivers we use 127.0.0.1 instead of node IP, - // because of Docker on MacOs limitations for reaching to container's IP. - hostname = oci.DefaultBindIPV4 - port, err = oci.ForwardedPort(h.DriverName, h.Name, port) - if err != nil { - return "", errors.Wrap(err, "host port binding") - } - } else { - hostname, err = h.Driver.GetIP() - if err != nil { - return "", errors.Wrap(err, "get ip") - } - } - - if cc.KubernetesConfig.APIServerName != constants.APIServerName { - hostname = cc.KubernetesConfig.APIServerName - } - return fmt.Sprintf("https://" + net.JoinHostPort(hostname, strconv.Itoa(port))), nil -} - // configureMounts configures any requested filesystem mounts -func configureMounts() { +func configureMounts(wg *sync.WaitGroup) { + wg.Add(1) + defer wg.Done() + if !viper.GetBool(createMount) { return } diff --git a/pkg/minikube/node/machine.go b/pkg/minikube/node/machine.go deleted file mode 100644 index 483131515a..0000000000 --- a/pkg/minikube/node/machine.go +++ /dev/null @@ -1,187 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package node - -import ( - "fmt" - "net" - "os" - "os/exec" - "strings" - "time" - - "github.com/docker/machine/libmachine" - "github.com/docker/machine/libmachine/host" - "github.com/golang/glog" - "github.com/spf13/viper" - "k8s.io/minikube/pkg/minikube/bootstrapper/images" - "k8s.io/minikube/pkg/minikube/command" - "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/driver" - "k8s.io/minikube/pkg/minikube/exit" - "k8s.io/minikube/pkg/minikube/machine" - "k8s.io/minikube/pkg/minikube/out" - "k8s.io/minikube/pkg/minikube/proxy" - "k8s.io/minikube/pkg/util/retry" -) - -func startMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) { - m, err := machine.NewAPIClient() - if err != nil { - exit.WithError("Failed to get machine client", err) - } - host, preExists = startHost(m, *cfg, *node) - runner, err = machine.CommandRunner(host) - if err != nil { - exit.WithError("Failed to get command runner", err) - } - - ip := validateNetwork(host, runner) - - // Bypass proxy for minikube's vm host ip - err = proxy.ExcludeIP(ip) - if err != nil { - out.ErrT(out.FailureType, "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.", out.V{"ip": ip}) - } - // Save IP to configuration file for subsequent use - node.IP = ip - - if err := Save(cfg, node); err != nil { - exit.WithError("Failed to save config", err) - } - - return runner, preExists, m, host -} - -// startHost starts a new minikube host using a VM or None -func startHost(api libmachine.API, mc config.ClusterConfig, n config.Node) (*host.Host, bool) { - exists, err := api.Exists(mc.Name) - if err != nil { - exit.WithError("Failed to check if machine exists", err) - } - - host, err := machine.StartHost(api, mc, n) - if err != nil { - exit.WithError("Unable to start VM. Please investigate and run 'minikube delete' if possible", err) - } - return host, exists -} - -// validateNetwork tries to catch network problems as soon as possible -func validateNetwork(h *host.Host, r command.Runner) string { - ip, err := h.Driver.GetIP() - if err != nil { - exit.WithError("Unable to get VM IP address", err) - } - - optSeen := false - warnedOnce := false - for _, k := range proxy.EnvVars { - if v := os.Getenv(k); v != "" { - if !optSeen { - out.T(out.Internet, "Found network options:") - optSeen = true - } - out.T(out.Option, "{{.key}}={{.value}}", out.V{"key": k, "value": v}) - ipExcluded := proxy.IsIPExcluded(ip) // Skip warning if minikube ip is already in NO_PROXY - k = strings.ToUpper(k) // for http_proxy & https_proxy - if (k == "HTTP_PROXY" || k == "HTTPS_PROXY") && !ipExcluded && !warnedOnce { - out.WarningT("You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details", out.V{"ip_address": ip, "documentation_url": "https://minikube.sigs.k8s.io/docs/reference/networking/proxy/"}) - warnedOnce = true - } - } - } - - if !driver.BareMetal(h.Driver.DriverName()) && !driver.IsKIC(h.Driver.DriverName()) { - trySSH(h, ip) - } - - tryLookup(r) - tryRegistry(r) - return ip -} - -func trySSH(h *host.Host, ip string) { - if viper.GetBool("force") { - return - } - - sshAddr := net.JoinHostPort(ip, "22") - - dial := func() (err error) { - d := net.Dialer{Timeout: 3 * time.Second} - conn, err := d.Dial("tcp", sshAddr) - if err != nil { - out.WarningT("Unable to verify SSH connectivity: {{.error}}. Will retry...", out.V{"error": err}) - return err - } - _ = conn.Close() - return nil - } - - if err := retry.Expo(dial, time.Second, 13*time.Second); err != nil { - exit.WithCodeT(exit.IO, `minikube is unable to connect to the VM: {{.error}} - - This is likely due to one of two reasons: - - - VPN or firewall interference - - {{.hypervisor}} network configuration issue - - Suggested workarounds: - - - Disable your local VPN or firewall software - - Configure your local VPN or firewall to allow access to {{.ip}} - - Restart or reinstall {{.hypervisor}} - - Use an alternative --driver - - Use --force to override this connectivity check - `, out.V{"error": err, "hypervisor": h.Driver.DriverName(), "ip": ip}) - } -} - -func tryLookup(r command.Runner) { - // DNS check - if rr, err := r.RunCmd(exec.Command("nslookup", "-type=ns", "kubernetes.io")); err != nil { - glog.Infof("%s failed: %v which might be okay will retry nslookup without query type", rr.Args, err) - // will try with without query type for ISOs with different busybox versions. - if _, err = r.RunCmd(exec.Command("nslookup", "kubernetes.io")); err != nil { - glog.Warningf("nslookup failed: %v", err) - // try with the older "host" command, instead of the newer "nslookup" - if _, err = r.RunCmd(exec.Command("host", "kubernetes.io")); err != nil { - out.WarningT("Node may be unable to resolve external DNS records") - } - } - } -} -func tryRegistry(r command.Runner) { - // Try an HTTPS connection to the image repository - proxy := os.Getenv("HTTPS_PROXY") - opts := []string{"-sS"} - if proxy != "" && !strings.HasPrefix(proxy, "localhost") && !strings.HasPrefix(proxy, "127.0") { - opts = append([]string{"-x", proxy}, opts...) - } - - repo := viper.GetString(imageRepository) - if repo == "" { - repo = images.DefaultKubernetesRepo - } - - opts = append(opts, fmt.Sprintf("https://%s/", repo)) - if rr, err := r.RunCmd(exec.Command("curl", opts...)); err != nil { - glog.Warningf("%s failed: %v", rr.Args, err) - out.WarningT("VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository", out.V{"repository": repo}) - } -} diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index e92bad65b5..dcc4f4d7d5 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -17,63 +17,51 @@ limitations under the License. package node import ( - "errors" + "fmt" + "github.com/pkg/errors" "github.com/spf13/viper" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/machine" ) +// TODO: Share these between cluster and node packages const ( - imageRepository = "image-repository" - cacheImages = "cache-images" - waitUntilHealthy = "wait" - cacheImageConfigKey = "cache" - containerRuntime = "container-runtime" - embedCerts = "embed-certs" - keepContext = "keep-context" - mountString = "mount-string" - createMount = "mount" - waitTimeout = "wait-timeout" + mountString = "mount-string" + createMount = "mount" ) // Add adds a new node config to an existing cluster. -func Add(cc *config.ClusterConfig, name string, controlPlane bool, worker bool, k8sVersion string, profileName string) (*config.Node, error) { - n := config.Node{ - Name: name, - Worker: true, +func Add(cc *config.ClusterConfig, n config.Node) error { + if err := config.SaveNode(cc, &n); err != nil { + return errors.Wrap(err, "save node") } - if controlPlane { - n.ControlPlane = true - } - - if worker { - n.Worker = true - } - - if k8sVersion != "" { - n.KubernetesVersion = k8sVersion - } else { - n.KubernetesVersion = cc.KubernetesConfig.KubernetesVersion - } - - cc.Nodes = append(cc.Nodes, n) - err := config.SaveProfile(profileName, cc) + r, p, m, h, err := Provision(cc, &n, false) if err != nil { - return nil, err + return err + } + s := Starter{ + Runner: r, + PreExists: p, + MachineAPI: m, + Host: h, + Cfg: cc, + Node: &n, + ExistingAddons: nil, } - _, err = Start(*cc, n, false, nil) - return &n, err + _, err = Start(s, false) + return err } // Delete stops and deletes the given node from the given cluster func Delete(cc config.ClusterConfig, name string) error { n, index, err := Retrieve(&cc, name) if err != nil { - return err + return errors.Wrap(err, "retrieve") } api, err := machine.NewAPIClient() @@ -117,3 +105,8 @@ func Save(cfg *config.ClusterConfig, node *config.Node) error { } return config.SaveProfile(viper.GetString(config.ProfileName), cfg) } + +// Name returns the appropriate name for the node given the current number of nodes +func Name(index int) string { + return fmt.Sprintf("m%02d", index) +} diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index a3c5eee92b..b469ba6e16 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -17,114 +17,491 @@ limitations under the License. package node import ( + "fmt" + "net" "os" + "os/exec" + "runtime/debug" + "strconv" + "strings" + "sync" + "time" + "github.com/blang/semver" + "github.com/docker/machine/libmachine" + "github.com/docker/machine/libmachine/host" + "github.com/golang/glog" + "github.com/pkg/errors" "github.com/spf13/viper" "golang.org/x/sync/errgroup" + cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" "k8s.io/minikube/pkg/addons" + "k8s.io/minikube/pkg/minikube/bootstrapper" + "k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify" + "k8s.io/minikube/pkg/minikube/bootstrapper/images" + "k8s.io/minikube/pkg/minikube/cluster" + "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/constants" + "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/kubeconfig" "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/logs" + "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/proxy" "k8s.io/minikube/pkg/util" + "k8s.io/minikube/pkg/util/retry" ) +const waitTimeout = "wait-timeout" + +var ( + kicGroup errgroup.Group + cacheGroup errgroup.Group +) + +// Starter is a struct with all the necessary information to start a node +type Starter struct { + Runner command.Runner + PreExists bool + MachineAPI libmachine.API + Host *host.Host + Cfg *config.ClusterConfig + Node *config.Node + ExistingAddons map[string]bool +} + // Start spins up a guest and starts the kubernetes node. -func Start(mc config.ClusterConfig, n config.Node, primary bool, existingAddons map[string]bool) (*kubeconfig.Settings, error) { - k8sVersion := mc.KubernetesConfig.KubernetesVersion - driverName := mc.Driver - - // If using kic, make sure we download the kic base image - var kicGroup errgroup.Group - if driver.IsKIC(driverName) { - beginDownloadKicArtifacts(&kicGroup) - } - - var cacheGroup errgroup.Group - // Adding a second layer of cache does not make sense for the none driver - if !driver.BareMetal(driverName) { - beginCacheKubernetesImages(&cacheGroup, mc.KubernetesConfig.ImageRepository, k8sVersion, mc.KubernetesConfig.ContainerRuntime) - } - - // Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot. - // Hence, saveProfile must be called before startHost, and again afterwards when we know the IP. - if err := config.SaveProfile(viper.GetString(config.ProfileName), &mc); err != nil { - exit.WithError("Failed to save config", err) - } - - // exits here in case of --download-only option. - handleDownloadOnly(&cacheGroup, &kicGroup, k8sVersion) - waitDownloadKicArtifacts(&kicGroup) - - mRunner, preExists, machineAPI, host := startMachine(&mc, &n) - defer machineAPI.Close() - +func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { // wait for preloaded tarball to finish downloading before configuring runtimes waitCacheRequiredImages(&cacheGroup) - sv, err := util.ParseKubernetesVersion(mc.KubernetesConfig.KubernetesVersion) + sv, err := util.ParseKubernetesVersion(starter.Node.KubernetesVersion) if err != nil { - return nil, err + return nil, errors.Wrap(err, "Failed to parse kubernetes version") } // configure the runtime (docker, containerd, crio) - cr := configureRuntimes(mRunner, driverName, mc.KubernetesConfig, sv) - showVersionInfo(k8sVersion, cr) + cr := configureRuntimes(starter.Runner, *starter.Cfg, sv) + showVersionInfo(starter.Node.KubernetesVersion, cr) - //TODO(sharifelgamal): Part out the cluster-wide operations, perhaps using the "primary" param - - // Must be written before bootstrap, otherwise health checks may flake due to stale IP - kubeconfig, err := setupKubeconfig(host, &mc, &n, mc.Name) - if err != nil { - exit.WithError("Failed to setup kubeconfig", err) - } - - // setup kubeadm (must come after setupKubeconfig) - bs := setupKubeAdm(machineAPI, mc, n) - - // pull images or restart cluster - out.T(out.Launch, "Launching Kubernetes ... ") - if err := bs.StartCluster(mc); err != nil { - exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, mRunner)) - } - configureMounts() - - // enable addons, both old and new! - if existingAddons != nil { - addons.Start(viper.GetString(config.ProfileName), existingAddons, AddonList) - } - - if err = CacheAndLoadImagesInConfig(); err != nil { - out.T(out.FailureType, "Unable to load cached images from config file.") - } - - // special ops for none , like change minikube directory. - if driverName == driver.None { - prepareNone() - } - - // Skip pre-existing, because we already waited for health - if viper.GetBool(waitUntilHealthy) && !preExists { - if err := bs.WaitForCluster(mc, viper.GetDuration(waitTimeout)); err != nil { - exit.WithError("Wait failed", err) + // ssh should be set up by now + // switch to using ssh runner since it is faster + if driver.IsKIC(starter.Cfg.Driver) { + sshRunner, err := machine.SSHRunner(starter.Host) + if err != nil { + glog.Infof("error getting ssh runner: %v", err) + } else { + glog.Infof("Using ssh runner for kic...") + starter.Runner = sshRunner } } - return kubeconfig, nil + var bs bootstrapper.Bootstrapper + var kcs *kubeconfig.Settings + if apiServer { + // Must be written before bootstrap, otherwise health checks may flake due to stale IP + kcs = setupKubeconfig(starter.Host, starter.Cfg, starter.Node, starter.Cfg.Name) + if err != nil { + return nil, errors.Wrap(err, "Failed to setup kubeconfig") + } + + // setup kubeadm (must come after setupKubeconfig) + bs = setupKubeAdm(starter.MachineAPI, *starter.Cfg, *starter.Node) + err = bs.StartCluster(*starter.Cfg) + + if err != nil { + out.LogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, *starter.Cfg, starter.Runner)) + return nil, err + } + + // write the kubeconfig to the file system after everything required (like certs) are created by the bootstrapper + if err := kubeconfig.Update(kcs); err != nil { + return nil, errors.Wrap(err, "Failed to update kubeconfig file.") + } + } else { + bs, err = cluster.Bootstrapper(starter.MachineAPI, viper.GetString(cmdcfg.Bootstrapper), *starter.Cfg, *starter.Node) + if err != nil { + return nil, errors.Wrap(err, "Failed to get bootstrapper") + } + + if err = bs.SetupCerts(starter.Cfg.KubernetesConfig, *starter.Node); err != nil { + return nil, errors.Wrap(err, "setting up certs") + } + } + + var wg sync.WaitGroup + go configureMounts(&wg) + + wg.Add(1) + go func() { + if err := CacheAndLoadImagesInConfig(); err != nil { + out.FailureT("Unable to push cached images: {{error}}", out.V{"error": err}) + } + wg.Done() + }() + + // enable addons, both old and new! + if starter.ExistingAddons != nil { + go addons.Start(&wg, starter.Cfg, starter.ExistingAddons, config.AddonList) + } + + if apiServer { + // special ops for none , like change minikube directory. + // multinode super doesn't work on the none driver + if starter.Cfg.Driver == driver.None && len(starter.Cfg.Nodes) == 1 { + prepareNone() + } + + // Skip pre-existing, because we already waited for health + if kverify.ShouldWait(starter.Cfg.VerifyComponents) && !starter.PreExists { + if err := bs.WaitForNode(*starter.Cfg, *starter.Node, viper.GetDuration(waitTimeout)); err != nil { + return nil, errors.Wrap(err, "Wait failed") + } + } + } else { + if err := bs.UpdateNode(*starter.Cfg, *starter.Node, cr); err != nil { + return nil, errors.Wrap(err, "Updating node") + } + + cp, err := config.PrimaryControlPlane(starter.Cfg) + if err != nil { + return nil, errors.Wrap(err, "Getting primary control plane") + } + cpBs, err := cluster.Bootstrapper(starter.MachineAPI, viper.GetString(cmdcfg.Bootstrapper), *starter.Cfg, cp) + if err != nil { + return nil, errors.Wrap(err, "Getting bootstrapper") + } + + joinCmd, err := cpBs.GenerateToken(*starter.Cfg) + if err != nil { + return nil, errors.Wrap(err, "generating join token") + } + + if err = bs.JoinCluster(*starter.Cfg, *starter.Node, joinCmd); err != nil { + return nil, errors.Wrap(err, "joining cluster") + } + } + + wg.Wait() + + // Write enabled addons to the config before completion + return kcs, config.Write(viper.GetString(config.ProfileName), starter.Cfg) +} + +// Provision provisions the machine/container for the node +func Provision(cc *config.ClusterConfig, n *config.Node, apiServer bool) (command.Runner, bool, libmachine.API, *host.Host, error) { + + name := driver.MachineName(*cc, *n) + if apiServer { + out.T(out.ThumbsUp, "Starting control plane node {{.name}} in cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name}) + } else { + out.T(out.ThumbsUp, "Starting node {{.name}} in cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name}) + } + + if driver.IsKIC(cc.Driver) { + beginDownloadKicArtifacts(&kicGroup) + } + + if !driver.BareMetal(cc.Driver) { + beginCacheKubernetesImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion, cc.KubernetesConfig.ContainerRuntime) + } + + // Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot. + // Hence, saveConfig must be called before startHost, and again afterwards when we know the IP. + if err := config.SaveProfile(viper.GetString(config.ProfileName), cc); err != nil { + return nil, false, nil, nil, errors.Wrap(err, "Failed to save config") + } + + handleDownloadOnly(&cacheGroup, &kicGroup, n.KubernetesVersion) + waitDownloadKicArtifacts(&kicGroup) + + return startMachine(cc, n) + +} + +// ConfigureRuntimes does what needs to happen to get a runtime going. +func configureRuntimes(runner cruntime.CommandRunner, cc config.ClusterConfig, kv semver.Version) cruntime.Manager { + co := cruntime.Config{ + Type: cc.KubernetesConfig.ContainerRuntime, + Runner: runner, + ImageRepository: cc.KubernetesConfig.ImageRepository, + KubernetesVersion: kv, + } + cr, err := cruntime.New(co) + if err != nil { + exit.WithError("Failed runtime", err) + } + + disableOthers := true + if driver.BareMetal(cc.Driver) { + disableOthers = false + } + + // Preload is overly invasive for bare metal, and caching is not meaningful. KIC handled elsewhere. + if driver.IsVM(cc.Driver) { + if err := cr.Preload(cc.KubernetesConfig); err != nil { + switch err.(type) { + case *cruntime.ErrISOFeature: + out.ErrT(out.Tip, "Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'", out.V{"error": err}) + default: + glog.Warningf("%s preload failed: %v, falling back to caching images", cr.Name(), err) + } + + if err := machine.CacheImagesForBootstrapper(cc.KubernetesConfig.ImageRepository, cc.KubernetesConfig.KubernetesVersion, viper.GetString(cmdcfg.Bootstrapper)); err != nil { + exit.WithError("Failed to cache images", err) + } + } + } + + err = cr.Enable(disableOthers) + if err != nil { + debug.PrintStack() + exit.WithError("Failed to enable container runtime", err) + } + + return cr +} + +// setupKubeAdm adds any requested files into the VM before Kubernetes is started +func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node) bootstrapper.Bootstrapper { + bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cfg, n) + if err != nil { + exit.WithError("Failed to get bootstrapper", err) + } + for _, eo := range config.ExtraOptions { + out.T(out.Option, "{{.extra_option_component_name}}.{{.key}}={{.value}}", out.V{"extra_option_component_name": eo.Component, "key": eo.Key, "value": eo.Value}) + } + // Loads cached images, generates config files, download binaries + // update cluster and set up certs in parallel + var parallel sync.WaitGroup + parallel.Add(2) + go func() { + if err := bs.UpdateCluster(cfg); err != nil { + exit.WithError("Failed to update cluster", err) + } + parallel.Done() + }() + + go func() { + if err := bs.SetupCerts(cfg.KubernetesConfig, n); err != nil { + exit.WithError("Failed to setup certs", err) + } + parallel.Done() + }() + + parallel.Wait() + return bs +} + +func setupKubeconfig(h *host.Host, cc *config.ClusterConfig, n *config.Node, clusterName string) *kubeconfig.Settings { + addr, err := apiServerURL(*h, *cc, *n) + if err != nil { + exit.WithError("Failed to get API Server URL", err) + } + + if cc.KubernetesConfig.APIServerName != constants.APIServerName { + addr = strings.Replace(addr, n.IP, cc.KubernetesConfig.APIServerName, -1) + } + kcs := &kubeconfig.Settings{ + ClusterName: clusterName, + ClusterServerAddress: addr, + ClientCertificate: localpath.ClientCert(cc.Name), + ClientKey: localpath.ClientKey(cc.Name), + CertificateAuthority: localpath.CACert(), + KeepContext: cc.KeepContext, + EmbedCerts: cc.EmbedCerts, + } + + kcs.SetPath(kubeconfig.PathFromEnv()) + return kcs +} + +func apiServerURL(h host.Host, cc config.ClusterConfig, n config.Node) (string, error) { + hostname, _, port, err := driver.ControlPaneEndpoint(&cc, &n, h.DriverName) + if err != nil { + return "", err + } + return fmt.Sprintf("https://" + net.JoinHostPort(hostname, strconv.Itoa(port))), nil +} + +// StartMachine starts a VM +func startMachine(cfg *config.ClusterConfig, node *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host, err error) { + m, err := machine.NewAPIClient() + if err != nil { + return runner, preExists, m, host, errors.Wrap(err, "Failed to get machine client") + } + host, preExists, err = startHost(m, *cfg, *node) + if err != nil { + return runner, preExists, m, host, errors.Wrap(err, "Failed to start host") + } + runner, err = machine.CommandRunner(host) + if err != nil { + return runner, preExists, m, host, errors.Wrap(err, "Failed to get command runner") + } + + ip, err := validateNetwork(host, runner, cfg.KubernetesConfig.ImageRepository) + if err != nil { + return runner, preExists, m, host, errors.Wrap(err, "Failed to validate network") + } + + // Bypass proxy for minikube's vm host ip + err = proxy.ExcludeIP(ip) + if err != nil { + out.FailureT("Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.", out.V{"ip": ip}) + } + + // Save IP to config file for subsequent use + node.IP = ip + err = config.SaveNode(cfg, node) + if err != nil { + return runner, preExists, m, host, errors.Wrap(err, "saving node") + } + + return runner, preExists, m, host, err +} + +// startHost starts a new minikube host using a VM or None +func startHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.Host, bool, error) { + host, exists, err := machine.StartHost(api, cc, n) + if err == nil { + return host, exists, nil + } + out.ErrT(out.Embarrassed, "StartHost failed, but will try again: {{.error}}", out.V{"error": err}) + + // NOTE: People get very cranky if you delete their prexisting VM. Only delete new ones. + if !exists { + err := machine.DeleteHost(api, driver.MachineName(cc, n)) + if err != nil { + glog.Warningf("delete host: %v", err) + } + } + + // Try again, but just once to avoid making the logs overly confusing + time.Sleep(5 * time.Second) + + host, exists, err = machine.StartHost(api, cc, n) + if err == nil { + return host, exists, nil + } + + // Don't use host.Driver to avoid nil pointer deref + drv := cc.Driver + out.ErrT(out.Sad, `Failed to start {{.driver}} {{.driver_type}}. "{{.cmd}}" may fix it: {{.error}}`, out.V{"driver": drv, "driver_type": driver.MachineType(drv), "cmd": mustload.ExampleCmd(cc.Name, "start"), "error": err}) + return host, exists, err +} + +// validateNetwork tries to catch network problems as soon as possible +func validateNetwork(h *host.Host, r command.Runner, imageRepository string) (string, error) { + ip, err := h.Driver.GetIP() + if err != nil { + return ip, err + } + + optSeen := false + warnedOnce := false + for _, k := range proxy.EnvVars { + if v := os.Getenv(k); v != "" { + if !optSeen { + out.T(out.Internet, "Found network options:") + optSeen = true + } + out.T(out.Option, "{{.key}}={{.value}}", out.V{"key": k, "value": v}) + ipExcluded := proxy.IsIPExcluded(ip) // Skip warning if minikube ip is already in NO_PROXY + k = strings.ToUpper(k) // for http_proxy & https_proxy + if (k == "HTTP_PROXY" || k == "HTTPS_PROXY") && !ipExcluded && !warnedOnce { + out.WarningT("You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details", out.V{"ip_address": ip, "documentation_url": "https://minikube.sigs.k8s.io/docs/reference/networking/proxy/"}) + warnedOnce = true + } + } + } + + if !driver.BareMetal(h.Driver.DriverName()) && !driver.IsKIC(h.Driver.DriverName()) { + if err := trySSH(h, ip); err != nil { + return ip, err + } + } + + // Non-blocking + go tryRegistry(r, h.Driver.DriverName(), imageRepository) + return ip, nil +} + +func trySSH(h *host.Host, ip string) error { + if viper.GetBool("force") { + return nil + } + + sshAddr := net.JoinHostPort(ip, "22") + + dial := func() (err error) { + d := net.Dialer{Timeout: 3 * time.Second} + conn, err := d.Dial("tcp", sshAddr) + if err != nil { + out.WarningT("Unable to verify SSH connectivity: {{.error}}. Will retry...", out.V{"error": err}) + return err + } + _ = conn.Close() + return nil + } + + err := retry.Expo(dial, time.Second, 13*time.Second) + if err != nil { + out.ErrT(out.FailureType, `minikube is unable to connect to the VM: {{.error}} + + This is likely due to one of two reasons: + + - VPN or firewall interference + - {{.hypervisor}} network configuration issue + + Suggested workarounds: + + - Disable your local VPN or firewall software + - Configure your local VPN or firewall to allow access to {{.ip}} + - Restart or reinstall {{.hypervisor}} + - Use an alternative --vm-driver + - Use --force to override this connectivity check + `, out.V{"error": err, "hypervisor": h.Driver.DriverName(), "ip": ip}) + } + + return err +} + +// tryRegistry tries to connect to the image repository +func tryRegistry(r command.Runner, driverName string, imageRepository string) { + // 2 second timeout. For best results, call tryRegistry in a non-blocking manner. + opts := []string{"-sS", "-m", "2"} + + proxy := os.Getenv("HTTPS_PROXY") + if proxy != "" && !strings.HasPrefix(proxy, "localhost") && !strings.HasPrefix(proxy, "127.0") { + opts = append([]string{"-x", proxy}, opts...) + } + + if imageRepository == "" { + imageRepository = images.DefaultKubernetesRepo + } + + opts = append(opts, fmt.Sprintf("https://%s/", imageRepository)) + if rr, err := r.RunCmd(exec.Command("curl", opts...)); err != nil { + glog.Warningf("%s failed: %v", rr.Args, err) + out.WarningT("This {{.type}} is having trouble accessing https://{{.repository}}", out.V{"repository": imageRepository, "type": driver.MachineType(driverName)}) + out.ErrT(out.Tip, "To pull new external images, you may need to configure a proxy: https://minikube.sigs.k8s.io/docs/reference/networking/proxy/") + } } // prepareNone prepares the user and host for the joy of the "none" driver func prepareNone() { out.T(out.StartingNone, "Configuring local host environment ...") if viper.GetBool(config.WantNoneDriverWarning) { - out.T(out.Empty, "") - out.WarningT("The 'none' driver provides limited isolation and may reduce system security and reliability.") - out.WarningT("For more information, see:") - out.T(out.URL, "https://minikube.sigs.k8s.io/docs/reference/drivers/none/") - out.T(out.Empty, "") + out.ErrT(out.Empty, "") + out.WarningT("The 'none' driver is designed for experts who need to integrate with an existing VM") + out.ErrT(out.Tip, "Most users should use the newer 'docker' driver instead, which does not require root!") + out.ErrT(out.Documentation, "For more information, see: https://minikube.sigs.k8s.io/docs/reference/drivers/none/") + out.ErrT(out.Empty, "") } if os.Getenv("CHANGE_MINIKUBE_NONE_USER") == "" { @@ -132,12 +509,12 @@ func prepareNone() { out.WarningT("kubectl and minikube configuration will be stored in {{.home_folder}}", out.V{"home_folder": home}) out.WarningT("To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:") - out.T(out.Empty, "") - out.T(out.Command, "sudo mv {{.home_folder}}/.kube {{.home_folder}}/.minikube $HOME", out.V{"home_folder": home}) - out.T(out.Command, "sudo chown -R $USER $HOME/.kube $HOME/.minikube") - out.T(out.Empty, "") + out.ErrT(out.Empty, "") + out.ErrT(out.Command, "sudo mv {{.home_folder}}/.kube {{.home_folder}}/.minikube $HOME", out.V{"home_folder": home}) + out.ErrT(out.Command, "sudo chown -R $USER $HOME/.kube $HOME/.minikube") + out.ErrT(out.Empty, "") - out.T(out.Tip, "This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true") + out.ErrT(out.Tip, "This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true") } if err := util.MaybeChownDirRecursiveToMinikubeUser(localpath.MiniPath()); err != nil { diff --git a/pkg/minikube/out/out.go b/pkg/minikube/out/out.go index 9ecb23c053..618b5fd36e 100644 --- a/pkg/minikube/out/out.go +++ b/pkg/minikube/out/out.go @@ -26,6 +26,7 @@ import ( "github.com/golang/glog" isatty "github.com/mattn/go-isatty" + "k8s.io/minikube/pkg/minikube/translate" ) // By design, this package uses global references to language and output objects, in preference @@ -51,6 +52,9 @@ var ( OverrideEnv = "MINIKUBE_IN_STYLE" ) +// MaxLogEntries controls the number of log entries to show for each source +const MaxLogEntries = 3 + // fdWriter is the subset of file.File that implements io.Writer and Fd() type fdWriter interface { io.Writer @@ -62,12 +66,15 @@ type V map[string]interface{} // T writes a stylized and templated message to stdout func T(style StyleEnum, format string, a ...V) { - outStyled := applyTemplateFormatting(style, useColor, format, a...) + outStyled := ApplyTemplateFormatting(style, useColor, format, a...) String(outStyled) } // String writes a basic formatted string to stdout func String(format string, a ...interface{}) { + // Flush log buffer so that output order makes sense + glog.Flush() + if outFile == nil { glog.Warningf("[unset outFile]: %s", fmt.Sprintf(format, a...)) return @@ -85,7 +92,7 @@ func Ln(format string, a ...interface{}) { // ErrT writes a stylized and templated error message to stderr func ErrT(style StyleEnum, format string, a ...V) { - errStyled := applyTemplateFormatting(style, useColor, format, a...) + errStyled := ApplyTemplateFormatting(style, useColor, format, a...) Err(errStyled) } @@ -172,3 +179,29 @@ func wantsColor(fd uintptr) bool { glog.Infof("isatty.IsTerminal(%d) = %v\n", fd, isT) return isT } + +// LogEntries outputs an error along with any important log entries. +func LogEntries(msg string, err error, entries map[string][]string) { + DisplayError(msg, err) + + for name, lines := range entries { + T(FailureType, "Problems detected in {{.entry}}:", V{"entry": name}) + if len(lines) > MaxLogEntries { + lines = lines[:MaxLogEntries] + } + for _, l := range lines { + T(LogEntry, l) + } + } +} + +// DisplayError prints the error and displays the standard minikube error messaging +func DisplayError(msg string, err error) { + // use Warning because Error will display a duplicate message to stderr + glog.Warningf(fmt.Sprintf("%s: %v", msg, err)) + ErrT(Empty, "") + FatalT("{{.msg}}: {{.err}}", V{"msg": translate.T(msg), "err": err}) + ErrT(Empty, "") + ErrT(Sad, "minikube is exiting due to an error. If the above message is not useful, open an issue:") + ErrT(URL, "https://github.com/kubernetes/minikube/issues/new/choose") +} diff --git a/pkg/minikube/out/style.go b/pkg/minikube/out/style.go index 8154f46980..1cd400d2ae 100644 --- a/pkg/minikube/out/style.go +++ b/pkg/minikube/out/style.go @@ -60,7 +60,6 @@ var styles = map[StyleEnum]style{ Running: {Prefix: "🏃 "}, Provisioning: {Prefix: "🌱 "}, Restarting: {Prefix: "🔄 "}, - Reconfiguring: {Prefix: "📯 "}, Stopping: {Prefix: "✋ "}, Stopped: {Prefix: "🛑 "}, Warning: {Prefix: "❗ ", LowPrefix: lowWarning}, @@ -69,6 +68,7 @@ var styles = map[StyleEnum]style{ Launch: {Prefix: "🚀 "}, Sad: {Prefix: "😿 "}, ThumbsUp: {Prefix: "👍 "}, + ThumbsDown: {Prefix: "👎 "}, Option: {Prefix: " ▪ ", LowPrefix: lowIndent}, // Indented bullet Command: {Prefix: " ▪ ", LowPrefix: lowIndent}, // Indented bullet LogEntry: {Prefix: " "}, // Indent @@ -83,6 +83,8 @@ var styles = map[StyleEnum]style{ Sparkle: {Prefix: "✨ "}, Pause: {Prefix: "⏸️ "}, Unpause: {Prefix: "⏯️ "}, + Confused: {Prefix: "😕 "}, + Shrug: {Prefix: "🤷 "}, // Specialized purpose styles ISODownload: {Prefix: "💿 "}, @@ -90,7 +92,7 @@ var styles = map[StyleEnum]style{ Caching: {Prefix: "🤹 "}, StartingVM: {Prefix: "🔥 "}, StartingNone: {Prefix: "🤹 "}, - Provisioner: {Prefix: "ℹ️ "}, + Provisioner: {Prefix: "ℹ️ "}, Resetting: {Prefix: "🔄 "}, DeletingHost: {Prefix: "🔥 "}, Copying: {Prefix: "✨ "}, @@ -115,7 +117,7 @@ var styles = map[StyleEnum]style{ Unmount: {Prefix: "🔥 "}, MountOptions: {Prefix: "💾 "}, Fileserver: {Prefix: "🚀 ", OmitNewline: true}, - DryRun: {Prefix: "🏜️ "}, + DryRun: {Prefix: "🌵 "}, AddonEnable: {Prefix: "🌟 "}, AddonDisable: {Prefix: "🌑 "}, } @@ -160,7 +162,8 @@ func applyStyle(style StyleEnum, useColor bool, format string) string { return applyPrefix(s.Prefix, format) } -func applyTemplateFormatting(style StyleEnum, useColor bool, format string, a ...V) string { +// ApplyTemplateFormatting applies formatting to the provided template +func ApplyTemplateFormatting(style StyleEnum, useColor bool, format string, a ...V) string { if a == nil { a = []V{{}} } diff --git a/pkg/minikube/out/style_enum.go b/pkg/minikube/out/style_enum.go index d5937e2383..1437b26823 100644 --- a/pkg/minikube/out/style_enum.go +++ b/pkg/minikube/out/style_enum.go @@ -32,7 +32,6 @@ const ( Running Provisioning Restarting - Reconfiguring Stopping Stopped Warning @@ -42,6 +41,7 @@ const ( Launch Sad ThumbsUp + ThumbsDown Option Command LogEntry @@ -61,6 +61,7 @@ const ( DeletingHost Copying Connectivity + Confused Internet Mounting Celebrate @@ -89,4 +90,5 @@ const ( DryRun AddonEnable AddonDisable + Shrug ) diff --git a/pkg/minikube/out/style_test.go b/pkg/minikube/out/style_test.go index 06e1c42073..65f63c59b2 100644 --- a/pkg/minikube/out/style_test.go +++ b/pkg/minikube/out/style_test.go @@ -177,7 +177,7 @@ func TestApplyTemplateFormating(t *testing.T) { } for _, test := range tests { t.Run(test.description, func(t *testing.T) { - rawGot := applyTemplateFormatting(test.styleEnum, test.useColor, test.format, test.a...) + rawGot := ApplyTemplateFormatting(test.styleEnum, test.useColor, test.format, test.a...) got := strings.TrimSpace(rawGot) if got != test.expected { t.Errorf("Expected '%v' but got '%v'", test.expected, got) diff --git a/pkg/minikube/problem/err_map.go b/pkg/minikube/problem/err_map.go index ec93a3569a..da6f8fee95 100644 --- a/pkg/minikube/problem/err_map.go +++ b/pkg/minikube/problem/err_map.go @@ -55,7 +55,42 @@ var vmProblems = map[string]match{ Issues: []int{6098}, ShowIssueLink: true, }, - + "FILE_IN_USE": { + Regexp: re(`The process cannot access the file because it is being used by another process`), + Advice: "Another program is using a file required by minikube. If you are using Hyper-V, try stopping the minikube VM from within the Hyper-V manager", + URL: "https://docs.docker.com/machine/drivers/hyper-v/", + GOOS: []string{"windows"}, + Issues: []int{7300}, + }, + "CREATE_TIMEOUT": { + Regexp: re(`create host timed out in \d`), + Advice: "Try 'minikube delete', and disable any conflicting VPN or firewall software", + Issues: []int{7072}, + }, + "IMAGE_ARCH": { + Regexp: re(`Error: incompatible image architecture`), + Advice: "This driver does not yet work on your architecture. Maybe try --driver=none", + GOOS: []string{"linux"}, + Issues: []int{7071}, + }, + // Docker + "DOCKER_WSL2_MOUNT": { + Regexp: re(`cannot find cgroup mount destination: unknown`), + Advice: "Run: 'sudo mkdir /sys/fs/cgroup/systemd && sudo mount -t cgroup -o none,name=systemd cgroup /sys/fs/cgroup/systemd'", + URL: "https://github.com/microsoft/WSL/issues/4189", + Issues: []int{5392}, + GOOS: []string{"linux"}, + }, + "DOCKER_READONLY": { + Regexp: re(`mkdir /var/lib/docker/volumes.*: read-only file system`), + Advice: "Restart Docker", + Issues: []int{6825}, + }, + "DOCKER_CHROMEOS": { + Regexp: re(`Container.*is not running.*chown docker:docker`), + Advice: "minikube is not yet compatible with ChromeOS", + Issues: []int{6411}, + }, // Hyperkit "HYPERKIT_NO_IP": { Regexp: re(`IP address never found in dhcp leases file Temporary Error: Could not find an IP address for`), @@ -82,7 +117,6 @@ var vmProblems = map[string]match{ Issues: []int{6079, 5780}, GOOS: []string{"darwin"}, }, - // Hyper-V "HYPERV_NO_VSWITCH": { Regexp: re(`no External vswitch found. A valid vswitch must be available for this command to run.`), @@ -98,24 +132,29 @@ var vmProblems = map[string]match{ }, "HYPERV_POWERSHELL_NOT_FOUND": { Regexp: re(`Powershell was not found in the path`), - Advice: "To start minikube with HyperV Powershell must be in your PATH`", + Advice: "To start minikube with Hyper-V, Powershell must be in your PATH`", URL: "https://docs.docker.com/machine/drivers/hyper-v/", GOOS: []string{"windows"}, }, "HYPERV_AS_ADMIN": { Regexp: re(`Hyper-v commands have to be run as an Administrator`), - Advice: "Run the minikube command as an Administrator", + Advice: "Right-click the PowerShell icon and select Run as Administrator to open PowerShell in elevated mode.", URL: "https://rominirani.com/docker-machine-windows-10-hyper-v-troubleshooting-tips-367c1ea73c24", Issues: []int{4511}, GOOS: []string{"windows"}, }, + "HYPERV_NEEDS_ESC": { + Regexp: re(`The requested operation requires elevation.`), + Advice: "Right-click the PowerShell icon and select Run as Administrator to open PowerShell in elevated mode.", + Issues: []int{7347}, + GOOS: []string{"windows"}, + }, "HYPERV_FILE_DELETE_FAILURE": { Regexp: re(`Unable to remove machine directory`), Advice: "You may need to stop the Hyper-V Manager and run `minikube delete` again.", Issues: []int{6804}, GOOS: []string{"windows"}, }, - // KVM "KVM2_NOT_FOUND": { Regexp: re(`Driver "kvm2" not found. Do you have the plugin binary .* accessible in your PATH`), @@ -136,12 +175,12 @@ var vmProblems = map[string]match{ GOOS: []string{"linux"}, }, "KVM2_RESTART_NO_IP": { - Regexp: re(`Error starting stopped host: Machine didn't return an IP after 120 seconds`), + Regexp: re(`Error starting stopped host: Machine didn't return an IP after \d+ seconds`), Advice: "The KVM driver is unable to resurrect this old VM. Please run `minikube delete` to delete it and try again.", Issues: []int{3901, 3434}, }, "KVM2_START_NO_IP": { - Regexp: re(`Error in driver during machine creation: Machine didn't return an IP after 120 seconds`), + Regexp: re(`Error in driver during machine creation: Machine didn't return an IP after \d+ seconds`), Advice: "Check your firewall rules for interference, and run 'virt-host-validate' to check for KVM configuration issues. If you are running minikube within a VM, consider using --driver=none", URL: "https://minikube.sigs.k8s.io/docs/reference/drivers/kvm2/", Issues: []int{4249, 3566}, @@ -179,6 +218,12 @@ var vmProblems = map[string]match{ GOOS: []string{"linux"}, Issues: []int{5950}, }, + "KVM_OOM": { + Regexp: re(`cannot set up guest memory.*Cannot allocate memory`), + Advice: "Choose a smaller value for --memory, such as 2000", + GOOS: []string{"linux"}, + Issues: []int{6366}, + }, // None "NONE_APISERVER_MISSING": { Regexp: re(`apiserver process never appeared`), @@ -291,19 +336,24 @@ var vmProblems = map[string]match{ }, "VBOX_VTX_DISABLED": { Regexp: re(`This computer doesn't have VT-X/AMD-v enabled`), - Advice: "Virtualization support is disabled on your computer. If you are running minikube within a VM, try '--driver=none'. Otherwise, consult your systems BIOS manual for how to enable virtualization.", + Advice: "Virtualization support is disabled on your computer. If you are running minikube within a VM, try '--driver=docker'. Otherwise, consult your systems BIOS manual for how to enable virtualization.", Issues: []int{3900, 4730}, }, "VERR_VERR_VMX_DISABLED": { Regexp: re(`VT-x is disabled.*VERR_VMX_MSR_ALL_VMX_DISABLED`), - Advice: "Virtualization support is disabled on your computer. If you are running minikube within a VM, try '--driver=none'. Otherwise, consult your systems BIOS manual for how to enable virtualization.", + Advice: "Virtualization support is disabled on your computer. If you are running minikube within a VM, try '--driver=docker'. Otherwise, consult your systems BIOS manual for how to enable virtualization.", Issues: []int{5282, 5456}, }, "VBOX_VERR_VMX_NO_VMX": { Regexp: re(`VT-x is not available.*VERR_VMX_NO_VMX`), - Advice: "Your host does not support virtualization. If you are running minikube within a VM, try '--driver=none'. Otherwise, enable virtualization in your BIOS", + Advice: "Your host does not support virtualization. If you are running minikube within a VM, try '--driver=docker'. Otherwise, enable virtualization in your BIOS", Issues: []int{1994, 5326}, }, + "VERR_SVM_DISABLED": { + Regexp: re(`VERR_SVM_DISABLED`), + Advice: "Your host does not support virtualization. If you are running minikube within a VM, try '--driver=docker'. Otherwise, enable virtualization in your BIOS", + Issues: []int{7074}, + }, "VBOX_HOST_NETWORK": { Regexp: re(`Error setting up host only network on machine start.*Unspecified error`), Advice: "VirtualBox cannot create a network, probably because it conflicts with an existing network that minikube no longer knows about. Try running 'minikube delete'", @@ -353,10 +403,10 @@ var netProblems = map[string]match{ Issues: []int{3922, 6109, 6123}, }, "PULL_TIMEOUT_EXCEEDED": { - Regexp: re(`failed to pull image.*Client.Timeout exceeded while awaiting headers`), - Advice: "A firewall is blocking Docker the minikube VM from reaching the internet. You may need to configure it to use a proxy.", + Regexp: re(`ImagePull.*Timeout exceeded while awaiting headers`), + Advice: "A firewall is blocking Docker the minikube VM from reaching the image repository. You may need to select --image-repository, or use a proxy.", URL: proxyDoc, - Issues: []int{3898}, + Issues: []int{3898, 6070}, }, "SSH_AUTH_FAILURE": { Regexp: re(`ssh: handshake failed: ssh: unable to authenticate.*, no supported methods remain`), @@ -386,6 +436,12 @@ var netProblems = map[string]match{ Issues: []int{6107}, URL: proxyDoc, }, + "NOT_A_TLS_HANDSHAKE": { + Regexp: re(`tls: first record does not look like a TLS handshake`), + Advice: "Ensure that your value for HTTPS_PROXY points to an HTTPS proxy rather than an HTTP proxy", + Issues: []int{7286}, + URL: proxyDoc, + }, } // deployProblems are Kubernetes deployment problems. @@ -405,7 +461,7 @@ var deployProblems = map[string]match{ }, "APISERVER_MISSING": { Regexp: re(`apiserver process never appeared`), - Advice: "Check that the provided apiserver flags are valid", + Advice: "Check that the provided apiserver flags are valid, and that SELinux is disabled", Issues: []int{4536, 6014}, }, "APISERVER_TIMEOUT": { @@ -444,6 +500,21 @@ var deployProblems = map[string]match{ Advice: "Confirm that you have a working internet connection and that your VM has not run out of resources by using: 'minikube logs'", Issues: []int{4749}, }, + "CERT_NOT_SIGNED_BY_CA": { + Regexp: re(`not signed by CA certificate ca: crypto/rsa: verification error`), + Advice: "Try 'minikube delete' to force new SSL certificates to be installed", + Issues: []int{6596}, + }, + "DOCKER_RESTART_FAILED": { + Regexp: re(`systemctl -f restart docker`), + Advice: "Remove the incompatible --docker-opt flag if one was provided", + Issues: []int{7070}, + }, + "WAITING_FOR_SSH": { + Regexp: re(`waiting for SSH to be available`), + Advice: "Try 'minikube delete', and disable any conflicting VPN or firewall software", + Issues: []int{4617}, + }, } // osProblems are operating-system specific issues @@ -470,6 +541,12 @@ var osProblems = map[string]match{ GOOS: []string{"darwin", "linux"}, Issues: []int{5714}, }, + "JUJU_LOCK_DENIED": { + Regexp: re(`unable to open /tmp/juju.*: permission denied`), + Advice: "Run 'sudo sysctl fs.protected_regular=1', or try a driver which does not require root, such as '--driver=docker'", + GOOS: []string{"linux"}, + Issues: []int{6391}, + }, } // stateProblems are issues relating to local state @@ -489,4 +566,9 @@ var stateProblems = map[string]match{ Advice: "The minikube VM is offline. Please run 'minikube start' to start it again.", Issues: []int{3849, 3648}, }, + "DASHBOARD_ROLE_REF": { + Regexp: re(`dashboard.*cannot change roleRef`), + Advice: "Run: 'kubectl delete clusterrolebinding kubernetes-dashboard'", + Issues: []int{7256}, + }, } diff --git a/pkg/minikube/problem/problem.go b/pkg/minikube/problem/problem.go index 1de611b0a7..d5465a1830 100644 --- a/pkg/minikube/problem/problem.go +++ b/pkg/minikube/problem/problem.go @@ -57,7 +57,6 @@ type match struct { // Display problem metadata to the console func (p *Problem) Display() { - out.ErrT(out.FailureType, "Error: [{{.id}}] {{.error}}", out.V{"id": p.ID, "error": p.Err}) out.ErrT(out.Tip, "Suggestion: {{.advice}}", out.V{"advice": translate.T(p.Advice)}) if p.URL != "" { out.ErrT(out.Documentation, "Documentation: {{.url}}", out.V{"url": p.URL}) @@ -65,6 +64,12 @@ func (p *Problem) Display() { if len(p.Issues) == 0 { return } + + if len(p.Issues) == 1 { + out.ErrT(out.Issues, "Related issue: {{.url}}", out.V{"url": fmt.Sprintf("%s/%d", issueBase, p.Issues[0])}) + return + } + out.ErrT(out.Issues, "Related issues:") issues := p.Issues if len(issues) > 3 { diff --git a/pkg/minikube/problem/problem_test.go b/pkg/minikube/problem/problem_test.go index 8f37424de3..d954271801 100644 --- a/pkg/minikube/problem/problem_test.go +++ b/pkg/minikube/problem/problem_test.go @@ -44,7 +44,6 @@ func TestDisplay(t *testing.T) { problem: Problem{ID: "example", URL: "example.com", Err: fmt.Errorf("test")}, description: "url, id and err", expected: ` -* Error: [example] test * Suggestion: * Documentation: example.com `, @@ -53,7 +52,6 @@ func TestDisplay(t *testing.T) { problem: Problem{ID: "example", URL: "example.com", Err: fmt.Errorf("test"), Issues: []int{0, 1}, Advice: "you need a hug"}, description: "with 2 issues and suggestion", expected: ` -* Error: [example] test * Suggestion: you need a hug * Documentation: example.com * Related issues: @@ -65,7 +63,6 @@ func TestDisplay(t *testing.T) { problem: Problem{ID: "example", URL: "example.com", Err: fmt.Errorf("test"), Issues: []int{0, 1}}, description: "with 2 issues", expected: ` -* Error: [example] test * Suggestion: * Documentation: example.com * Related issues: @@ -78,7 +75,6 @@ func TestDisplay(t *testing.T) { problem: Problem{ID: "example", URL: "example.com", Err: fmt.Errorf("test"), Issues: []int{0, 1, 2, 3, 4, 5}}, description: "with 6 issues", expected: ` -* Error: [example] test * Suggestion: * Documentation: example.com * Related issues: diff --git a/pkg/minikube/registry/drvs/docker/docker.go b/pkg/minikube/registry/drvs/docker/docker.go index 8549d35706..db82619eb8 100644 --- a/pkg/minikube/registry/drvs/docker/docker.go +++ b/pkg/minikube/registry/drvs/docker/docker.go @@ -20,9 +20,12 @@ import ( "context" "fmt" "os/exec" + "runtime" + "strings" "time" "github.com/docker/machine/libmachine/drivers" + "github.com/golang/glog" "k8s.io/minikube/pkg/drivers/kic" "k8s.io/minikube/pkg/drivers/kic/oci" "k8s.io/minikube/pkg/minikube/config" @@ -32,12 +35,21 @@ import ( ) func init() { + priority := registry.Default + // Staged rollout for preferred: + // - Linux + // - Windows (once "service" command works) + // - macOS + if runtime.GOOS == "linux" { + priority = registry.Preferred + } + if err := registry.Register(registry.DriverDef{ Name: driver.Docker, Config: configure, Init: func() drivers.Driver { return kic.NewDriver(kic.Config{OCIBinary: oci.Docker}) }, Status: status, - Priority: registry.Fallback, + Priority: priority, }); err != nil { panic(fmt.Sprintf("register failed: %v", err)) } @@ -58,19 +70,40 @@ func configure(cc config.ClusterConfig, n config.Node) (interface{}, error) { } func status() registry.State { + docURL := "https://minikube.sigs.k8s.io/docs/drivers/docker/" _, err := exec.LookPath(oci.Docker) if err != nil { - return registry.State{Error: err, Installed: false, Healthy: false, Fix: "Docker is required.", Doc: "https://minikube.sigs.k8s.io/docs/reference/drivers/docker/"} + return registry.State{Error: err, Installed: false, Healthy: false, Fix: "Install Docker", Doc: docURL} } - // Allow no more than 3 seconds for docker info - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), 6*time.Second) defer cancel() - err = exec.CommandContext(ctx, oci.Docker, "info").Run() - if err != nil { - return registry.State{Error: err, Installed: true, Healthy: false, Fix: "Docker is not running or is responding too slow. Try: restarting docker desktop."} + // Quickly returns an error code if server is not running + cmd := exec.CommandContext(ctx, oci.Docker, "version", "--format", "{{.Server.Version}}") + _, err = cmd.Output() + if err == nil { + return registry.State{Installed: true, Healthy: true} } - return registry.State{Installed: true, Healthy: true} + glog.Warningf("docker returned error: %v", err) + + // Basic timeout + if ctx.Err() == context.DeadlineExceeded { + return registry.State{Error: err, Installed: true, Healthy: false, Fix: "Restart the Docker service", Doc: docURL} + } + + if exitErr, ok := err.(*exec.ExitError); ok { + stderr := strings.TrimSpace(string(exitErr.Stderr)) + newErr := fmt.Errorf(`%q %v: %s`, strings.Join(cmd.Args, " "), exitErr, stderr) + + if strings.Contains(stderr, "Cannot connect") || strings.Contains(stderr, "refused") || strings.Contains(stderr, "Is the docker daemon running") { + return registry.State{Error: newErr, Installed: true, Healthy: false, Fix: "Start the Docker service", Doc: docURL} + } + + // We don't have good advice, but at least we can provide a good error message + return registry.State{Error: newErr, Installed: true, Healthy: false, Doc: docURL} + } + + return registry.State{Error: err, Installed: true, Healthy: false, Doc: docURL} } diff --git a/pkg/minikube/registry/drvs/none/none.go b/pkg/minikube/registry/drvs/none/none.go index 26542d4d28..ec8cea71d8 100644 --- a/pkg/minikube/registry/drvs/none/none.go +++ b/pkg/minikube/registry/drvs/none/none.go @@ -21,6 +21,7 @@ package none import ( "fmt" "os/exec" + "os/user" "github.com/docker/machine/libmachine/drivers" "k8s.io/minikube/pkg/drivers/none" @@ -51,9 +52,23 @@ func configure(cc config.ClusterConfig, n config.Node) (interface{}, error) { } func status() registry.State { - _, err := exec.LookPath("systemctl") + _, err := exec.LookPath("iptables") if err != nil { - return registry.State{Error: err, Fix: "Use a systemd based Linux distribution", Doc: "https://minikube.sigs.k8s.io/docs/reference/drivers/none/"} + return registry.State{Error: err, Fix: "iptables must be installed", Doc: "https://minikube.sigs.k8s.io/docs/reference/drivers/none/"} } + + if _, err := exec.LookPath("docker"); err != nil { + return registry.State{Error: err, Installed: false, Fix: "Install docker", Doc: "https://minikube.sigs.k8s.io/docs/reference/drivers/none/"} + } + + u, err := user.Current() + if err != nil { + return registry.State{Error: err, Healthy: false, Doc: "https://minikube.sigs.k8s.io/docs/reference/drivers/none/"} + } + + if u.Uid != "0" { + return registry.State{Error: fmt.Errorf("the 'none' driver must be run as the root user"), Healthy: false, Fix: "For non-root usage, try the newer 'docker' driver", Installed: true} + } + return registry.State{Installed: true, Healthy: true} } diff --git a/pkg/minikube/registry/global.go b/pkg/minikube/registry/global.go index 301f61cb9f..3f608ef471 100644 --- a/pkg/minikube/registry/global.go +++ b/pkg/minikube/registry/global.go @@ -24,6 +24,40 @@ import ( "github.com/golang/glog" ) +const ( + // Podman is Kubernetes in container using podman driver + Podman = "podman" + // Docker is Kubernetes in container using docker driver + Docker = "docker" + // Mock driver + Mock = "mock" + // None driver + None = "none" +) + +// IsKIC checks if the driver is a kubernetes in container +func IsKIC(name string) bool { + return name == Docker || name == Podman +} + +// IsMock checks if the driver is a mock +func IsMock(name string) bool { + return name == Mock +} + +// IsVM checks if the driver is a VM +func IsVM(name string) bool { + if IsKIC(name) || IsMock(name) || BareMetal(name) { + return false + } + return true +} + +// BareMetal returns if this driver is unisolated +func BareMetal(name string) bool { + return name == None || name == Mock +} + var ( // globalRegistry is a globally accessible driver registry globalRegistry = newRegistry() @@ -34,6 +68,8 @@ type DriverState struct { Name string Priority Priority State State + // Rejection is why we chose not to use this driver + Rejection string } func (d DriverState) String() string { @@ -59,7 +95,7 @@ func Driver(name string) DriverDef { } // Available returns a list of available drivers in the global registry -func Available() []DriverState { +func Available(vm bool) []DriverState { sts := []DriverState{} glog.Infof("Querying for installed drivers using PATH=%s", os.Getenv("PATH")) @@ -76,7 +112,13 @@ func Available() []DriverState { priority = Unhealthy } - sts = append(sts, DriverState{Name: d.Name, Priority: priority, State: s}) + if vm { + if IsVM(d.Name) { + sts = append(sts, DriverState{Name: d.Name, Priority: priority, State: s}) + } + } else { + sts = append(sts, DriverState{Name: d.Name, Priority: priority, State: s}) + } } // Descending priority for predictability diff --git a/pkg/minikube/registry/global_test.go b/pkg/minikube/registry/global_test.go index dbc76b6d51..9cb01a1e35 100644 --- a/pkg/minikube/registry/global_test.go +++ b/pkg/minikube/registry/global_test.go @@ -102,7 +102,7 @@ func TestGlobalAvailable(t *testing.T) { }, } - if diff := cmp.Diff(Available(), expected); diff != "" { + if diff := cmp.Diff(Available(false), expected); diff != "" { t.Errorf("available mismatch (-want +got):\n%s", diff) } } diff --git a/pkg/minikube/service/service.go b/pkg/minikube/service/service.go index eddb45dcf2..d84cc27f9d 100644 --- a/pkg/minikube/service/service.go +++ b/pkg/minikube/service/service.go @@ -22,6 +22,7 @@ import ( "io" "net/url" "os" + "strconv" "strings" "text/template" "time" @@ -196,6 +197,13 @@ func printURLsForService(c typed_core.CoreV1Interface, ip, service, namespace st urls := []string{} portNames := []string{} for _, port := range svc.Spec.Ports { + + if port.Name != "" { + m[port.TargetPort.IntVal] = fmt.Sprintf("%s/%d", port.Name, port.Port) + } else { + m[port.TargetPort.IntVal] = strconv.Itoa(int(port.Port)) + } + if port.NodePort > 0 { var doc bytes.Buffer err = t.Execute(&doc, struct { @@ -264,19 +272,34 @@ func PrintServiceList(writer io.Writer, data [][]string) { table.Render() } +// SVCNotFoundError error type handles 'service not found' scenarios +type SVCNotFoundError struct { + Err error +} + +// Error method for SVCNotFoundError type +func (t SVCNotFoundError) Error() string { + return "Service not found" +} + // WaitForService waits for a service, and return the urls when available func WaitForService(api libmachine.API, namespace string, service string, urlTemplate *template.Template, urlMode bool, https bool, wait int, interval int) ([]string, error) { - var urlList []string // Convert "Amount of time to wait" and "interval of each check" to attempts if interval == 0 { interval = 1 } + + err := CheckService(namespace, service) + if err != nil { + return nil, &SVCNotFoundError{err} + } + chkSVC := func() error { return CheckService(namespace, service) } if err := retry.Expo(chkSVC, time.Duration(interval)*time.Second, time.Duration(wait)*time.Second); err != nil { - return urlList, errors.Wrapf(err, "Service %s was not found in %q namespace. You may select another namespace by using 'minikube service %s -n ", service, namespace, service) + return nil, &SVCNotFoundError{err} } serviceURL, err := GetServiceURLsForService(api, namespace, service, urlTemplate) diff --git a/pkg/minikube/service/service_test.go b/pkg/minikube/service/service_test.go index 0368c1e240..449c55343b 100644 --- a/pkg/minikube/service/service_test.go +++ b/pkg/minikube/service/service_test.go @@ -134,13 +134,17 @@ var defaultNamespaceServiceInterface = &MockServiceInterface{ Spec: core.ServiceSpec{ Ports: []core.ServicePort{ { + Name: "port1", NodePort: int32(1111), + Port: int32(11111), TargetPort: intstr.IntOrString{ IntVal: int32(11111), }, }, { + Name: "port2", NodePort: int32(2222), + Port: int32(22222), TargetPort: intstr.IntOrString{ IntVal: int32(22222), }, @@ -324,7 +328,7 @@ func TestPrintURLsForService(t *testing.T) { serviceName: "mock-dashboard", namespace: "default", tmpl: template.Must(template.New("svc-arbitrary-template").Parse("{{.Name}}={{.IP}}:{{.Port}}")), - expectedOutput: []string{"port1=127.0.0.1:1111", "port2=127.0.0.1:2222"}, + expectedOutput: []string{"port1/11111=127.0.0.1:1111", "port2/22222=127.0.0.1:2222"}, }, { description: "empty slice for no node ports", @@ -452,7 +456,7 @@ func TestGetServiceURLs(t *testing.T) { Namespace: "default", Name: "mock-dashboard", URLs: []string{"http://127.0.0.1:1111", "http://127.0.0.1:2222"}, - PortNames: []string{"port1", "port2"}, + PortNames: []string{"port1/11111", "port2/22222"}, }, { Namespace: "default", diff --git a/pkg/minikube/sysinit/openrc.go b/pkg/minikube/sysinit/openrc.go new file mode 100644 index 0000000000..822c4157d0 --- /dev/null +++ b/pkg/minikube/sysinit/openrc.go @@ -0,0 +1,170 @@ +/* +Copyright 2019 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package sysinit provides an abstraction over init systems like systemctl +package sysinit + +import ( + "bytes" + "context" + "html/template" + "os/exec" + "path" + "time" + + "github.com/golang/glog" + "github.com/pkg/errors" + "k8s.io/minikube/pkg/minikube/assets" + "k8s.io/minikube/pkg/minikube/vmpath" +) + +var restartWrapper = `#!/bin/bash +# Wrapper script to emulate systemd restart on non-systemd systems +readonly UNIT_PATH=$1 + +while true; do + if [[ -f "${UNIT_PATH}" ]]; then + eval $(egrep "^ExecStart=" "${UNIT_PATH}" | cut -d"=" -f2-) + fi + sleep 1 +done +` + +var initScriptTmpl = template.Must(template.New("initScript").Parse(`#!/bin/bash +# OpenRC init script shim for systemd units +readonly NAME="{{.Name}}" +readonly RESTART_WRAPPER="{{.Wrapper}}" +readonly UNIT_PATH="{{.Unit}}" +readonly PID_PATH="/var/run/${NAME}.pid" + +function start() { + start-stop-daemon --oknodo --pidfile "${PID_PATH}" --background --start --make-pid --exec "${RESTART_WRAPPER}" "${UNIT_PATH}" +} + +function stop() { + if [[ -f "${PID_PATH}" ]]; then + pkill -P "$(cat ${PID_PATH})" + fi + start-stop-daemon --oknodo --pidfile "${PID_PATH}" --stop +} + +case "$1" in + start) + start + ;; + stop) + stop + ;; + restart) + stop + start + ;; + status) + start-stop-daemon --pidfile "${PID_PATH}" --status + ;; + *) + echo "Usage: {{.Name}} {start|stop|restart|status}" + exit 1 + ;; +esac +`)) + +// OpenRC is a service manager for OpenRC-like init systems +type OpenRC struct { + r Runner +} + +// Name returns the name of the init system +func (s *OpenRC) Name() string { + return "OpenRC" +} + +// Active checks if a service is running +func (s *OpenRC) Active(svc string) bool { + _, err := s.r.RunCmd(exec.Command("sudo", "service", svc, "status")) + return err == nil +} + +// Start starts a service idempotently +func (s *OpenRC) Start(svc string) error { + if s.Active(svc) { + return nil + } + ctx, cb := context.WithTimeout(context.Background(), 5*time.Second) + defer cb() + + rr, err := s.r.RunCmd(exec.CommandContext(ctx, "sudo", "service", svc, "start")) + glog.Infof("start output: %s", rr.Output()) + return err +} + +// Disable does nothing +func (s *OpenRC) Disable(svc string) error { + return nil +} + +// Enable does nothing +func (s *OpenRC) Enable(svc string) error { + return nil +} + +// Restart restarts a service +func (s *OpenRC) Restart(svc string) error { + rr, err := s.r.RunCmd(exec.Command("sudo", "service", svc, "restart")) + glog.Infof("restart output: %s", rr.Output()) + return err +} + +// Stop stops a service +func (s *OpenRC) Stop(svc string) error { + rr, err := s.r.RunCmd(exec.Command("sudo", "service", svc, "stop")) + glog.Infof("stop output: %s", rr.Output()) + return err +} + +// ForceStop stops a service with prejuidice +func (s *OpenRC) ForceStop(svc string) error { + return s.Stop(svc) +} + +// GenerateInitShim generates any additional init files required for this service +func (s *OpenRC) GenerateInitShim(svc string, binary string, unit string) ([]assets.CopyableFile, error) { + restartWrapperPath := path.Join(vmpath.GuestPersistentDir, "openrc-restart-wrapper.sh") + + opts := struct { + Binary string + Wrapper string + Name string + Unit string + }{ + Name: svc, + Binary: binary, + Wrapper: restartWrapperPath, + Unit: unit, + } + + var b bytes.Buffer + if err := initScriptTmpl.Execute(&b, opts); err != nil { + return nil, errors.Wrap(err, "template execute") + } + + files := []assets.CopyableFile{ + assets.NewMemoryAssetTarget([]byte(restartWrapper), restartWrapperPath, "0755"), + assets.NewMemoryAssetTarget(b.Bytes(), path.Join("/etc/init.d/", svc), "0755"), + } + + return files, nil +} diff --git a/pkg/minikube/sysinit/sysinit.go b/pkg/minikube/sysinit/sysinit.go new file mode 100644 index 0000000000..fc20479ded --- /dev/null +++ b/pkg/minikube/sysinit/sysinit.go @@ -0,0 +1,85 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sysinit + +import ( + "os/exec" + + "k8s.io/minikube/pkg/minikube/assets" + "k8s.io/minikube/pkg/minikube/command" +) + +var cachedSystemdCheck *bool + +// Runner is the subset of command.Runner this package consumes +type Runner interface { + RunCmd(cmd *exec.Cmd) (*command.RunResult, error) +} + +// Manager is a common interface for init systems +type Manager interface { + // Name returns the name of the init manager + Name() string + + // Active returns if a service is active + Active(string) bool + + // Disable disables a service + Disable(string) error + + // Enable enables a service + Enable(string) error + + // Start starts a service idempotently + Start(string) error + + // Restart restarts a service + Restart(string) error + + // Stop stops a service + Stop(string) error + + // ForceStop stops a service with prejudice + ForceStop(string) error + + // GenerateInitShim generates any additional init files required for this service + GenerateInitShim(svc string, binary string, unit string) ([]assets.CopyableFile, error) +} + +// New returns an appropriately configured service manager +func New(r Runner) Manager { + // If we are not provided a runner, we can't do anything anyways + if r == nil { + return nil + } + + var systemd bool + + // Caching the result is important, as this manager may be created in many places, + // and ssh calls are expensive on some drivers, such as Docker. + if cachedSystemdCheck != nil { + systemd = *cachedSystemdCheck + } else { + systemd = usesSystemd(r) + cachedSystemdCheck = &systemd + } + + if systemd { + return &Systemd{r: r} + } + return &OpenRC{r: r} +} diff --git a/pkg/minikube/sysinit/systemd.go b/pkg/minikube/sysinit/systemd.go new file mode 100644 index 0000000000..4bb53a186b --- /dev/null +++ b/pkg/minikube/sysinit/systemd.go @@ -0,0 +1,98 @@ +/* +Copyright 2019 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package sysinit provides an abstraction over init systems like systemctl +package sysinit + +import ( + "os/exec" + + "k8s.io/minikube/pkg/minikube/assets" +) + +// Systemd is a service manager for systemd distributions +type Systemd struct { + r Runner +} + +// Name returns the name of the init system +func (s *Systemd) Name() string { + return "systemd" +} + +// reload reloads systemd configuration +func (s *Systemd) reload() error { + _, err := s.r.RunCmd(exec.Command("sudo", "systemctl", "daemon-reload")) + return err +} + +// Active checks if a service is running +func (s *Systemd) Active(svc string) bool { + _, err := s.r.RunCmd(exec.Command("sudo", "systemctl", "is-active", "--quiet", "service", svc)) + return err == nil +} + +// Disable disables a service +func (s *Systemd) Disable(svc string) error { + _, err := s.r.RunCmd(exec.Command("sudo", "systemctl", "disable", svc)) + return err +} + +// Enable enables a service +func (s *Systemd) Enable(svc string) error { + _, err := s.r.RunCmd(exec.Command("sudo", "systemctl", "enable", svc)) + return err +} + +// Start starts a service +func (s *Systemd) Start(svc string) error { + if err := s.reload(); err != nil { + return err + } + _, err := s.r.RunCmd(exec.Command("sudo", "systemctl", "start", svc)) + return err +} + +// Restart restarts a service +func (s *Systemd) Restart(svc string) error { + if err := s.reload(); err != nil { + return err + } + _, err := s.r.RunCmd(exec.Command("sudo", "systemctl", "restart", svc)) + return err +} + +// Stop stops a service +func (s *Systemd) Stop(svc string) error { + _, err := s.r.RunCmd(exec.Command("sudo", "systemctl", "stop", svc)) + return err +} + +// ForceStop terminates a service with prejudice +func (s *Systemd) ForceStop(svc string) error { + _, err := s.r.RunCmd(exec.Command("sudo", "systemctl", "stop", "-f", svc)) + return err +} + +// GenerateInitShim does nothing for systemd +func (s *Systemd) GenerateInitShim(svc string, binary string, unit string) ([]assets.CopyableFile, error) { + return nil, nil +} + +func usesSystemd(r Runner) bool { + _, err := r.RunCmd(exec.Command("systemctl", "--version")) + return err == nil +} diff --git a/pkg/minikube/tests/driver_mock.go b/pkg/minikube/tests/driver_mock.go index 14d5b2f59d..2b9dff6ad0 100644 --- a/pkg/minikube/tests/driver_mock.go +++ b/pkg/minikube/tests/driver_mock.go @@ -17,6 +17,7 @@ limitations under the License. package tests import ( + "runtime" "testing" "github.com/docker/machine/libmachine/drivers" @@ -24,6 +25,7 @@ import ( "github.com/docker/machine/libmachine/state" "github.com/golang/glog" "github.com/pkg/errors" + "k8s.io/minikube/pkg/minikube/constants" ) // MockDriver is a struct used to mock out libmachine.Driver @@ -96,11 +98,14 @@ func (d *MockDriver) GetSSHKeyPath() string { // GetState returns the state of the driver func (d *MockDriver) GetState() (state.State, error) { - d.Logf("MockDriver.GetState: %v", d.CurrentState) - if d.NotExistError { + _, file, no, _ := runtime.Caller(2) + d.Logf("MockDriver.GetState called from %s#%d: returning %q", file, no, d.CurrentState) + + // NOTE: this logic is questionable + if d.NotExistError && d.CurrentState != state.Stopped && d.CurrentState != state.None { d.CurrentState = state.Error - // don't use cluster.ErrorMachineNotExist to avoid import cycle - return d.CurrentState, errors.New("machine does not exist") + d.Logf("mock NotExistError set, setting state=%s err=%v", d.CurrentState, constants.ErrMachineMissing) + return d.CurrentState, constants.ErrMachineMissing } return d.CurrentState, nil } @@ -123,12 +128,13 @@ func (d *MockDriver) Remove() error { if d.RemoveError { return errors.New("error deleting machine") } + d.NotExistError = false return nil } // Restart restarts the machine func (d *MockDriver) Restart() error { - d.Logf("MockDriver.Restart") + d.Logf("MockDriver.Restart, setting CurrentState=%s", state.Running) d.CurrentState = state.Running return nil } diff --git a/pkg/minikube/translate/translate.go b/pkg/minikube/translate/translate.go index 960352da82..4a6092515a 100644 --- a/pkg/minikube/translate/translate.go +++ b/pkg/minikube/translate/translate.go @@ -18,6 +18,8 @@ package translate import ( "encoding/json" + "fmt" + "path" "strings" "github.com/cloudfoundry-attic/jibber_jabber" @@ -73,11 +75,23 @@ func DetermineLocale() { } // Load translations for preferred language into memory. - translationFile := "translations/" + preferredLanguage.String() + ".json" + p := preferredLanguage.String() + translationFile := path.Join("translations", fmt.Sprintf("%s.json", p)) t, err := Asset(translationFile) if err != nil { - glog.Infof("Failed to load translation file for %s: %v", preferredLanguage.String(), err) - return + // Attempt to find a more broad locale, e.g. fr instead of fr-FR. + if strings.Contains(p, "-") { + p = strings.Split(p, "-")[0] + translationFile := path.Join("translations", fmt.Sprintf("%s.json", p)) + t, err = Asset(translationFile) + if err != nil { + glog.Infof("Failed to load translation file for %s: %v", p, err) + return + } + } else { + glog.Infof("Failed to load translation file for %s: %v", preferredLanguage.String(), err) + return + } } err = json.Unmarshal(t, &Translations) diff --git a/pkg/minikube/tunnel/route_freebsd.go b/pkg/minikube/tunnel/route_freebsd.go new file mode 100644 index 0000000000..19cd4e4ad3 --- /dev/null +++ b/pkg/minikube/tunnel/route_freebsd.go @@ -0,0 +1,166 @@ +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tunnel + +import ( + "fmt" + "net" + "os/exec" + "regexp" + "strings" + + "github.com/golang/glog" +) + +func (router *osRouter) EnsureRouteIsAdded(route *Route) error { + exists, err := isValidToAddOrDelete(router, route) + if err != nil { + return err + } + if exists { + return nil + } + + serviceCIDR := route.DestCIDR.String() + gatewayIP := route.Gateway.String() + + glog.Infof("Adding route for CIDR %s to gateway %s", serviceCIDR, gatewayIP) + command := exec.Command("sudo", "route", "-n", "add", serviceCIDR, gatewayIP) + glog.Infof("About to run command: %s", command.Args) + stdInAndOut, err := command.CombinedOutput() + message := fmt.Sprintf("%s", stdInAndOut) + re := regexp.MustCompile(fmt.Sprintf("add net (.*): gateway %s\n", gatewayIP)) + if !re.MatchString(message) { + return fmt.Errorf("error adding Route: %s, %d", message, len(strings.Split(message, "\n"))) + } + glog.Infof("%s", stdInAndOut) + + return err +} + +func (router *osRouter) Inspect(route *Route) (exists bool, conflict string, overlaps []string, err error) { + cmd := exec.Command("netstat", "-nr", "-f", "inet") + cmd.Env = append(cmd.Env, "LC_ALL=C") + stdInAndOut, err := cmd.CombinedOutput() + if err != nil { + err = fmt.Errorf("error running '%v': %s", cmd, err) + return + } + + rt := router.parseTable(stdInAndOut) + + exists, conflict, overlaps = rt.Check(route) + + return +} + +func (router *osRouter) parseTable(table []byte) routingTable { + t := routingTable{} + skip := true + for _, line := range strings.Split(string(table), "\n") { + // header + if strings.HasPrefix(line, "Destination") { + skip = false + continue + } + // don't care about the 0.0.0.0 routes + if skip || strings.HasPrefix(line, "default") { + continue + } + fields := strings.Fields(line) + + if len(fields) <= 2 { + continue + } + dstCIDRString := router.padCIDR(fields[0]) + gatewayIPString := fields[1] + gatewayIP := net.ParseIP(gatewayIPString) + + _, ipNet, err := net.ParseCIDR(dstCIDRString) + if err != nil { + glog.V(4).Infof("skipping line: can't parse CIDR from routing table: %s", dstCIDRString) + } else if gatewayIP == nil { + glog.V(4).Infof("skipping line: can't parse IP from routing table: %s", gatewayIPString) + } else { + tableLine := routingTableLine{ + route: &Route{ + DestCIDR: ipNet, + Gateway: gatewayIP, + }, + line: line, + } + t = append(t, tableLine) + } + } + + return t +} + +func (router *osRouter) padCIDR(origCIDR string) string { + s := "" + dots := 0 + slash := false + for i, c := range origCIDR { + if c == '.' { + dots++ + } + if c == '/' { + for dots < 3 { + s += ".0" + dots++ + } + slash = true + } + if i == len(origCIDR)-1 { + s += string(c) + bits := 32 - 8*(3-dots) + for dots < 3 { + s += ".0" + dots++ + } + if !slash { + s += fmt.Sprintf("/%d", bits) + } + } else { + s += string(c) + } + } + return s +} + +func (router *osRouter) Cleanup(route *Route) error { + glog.V(3).Infof("Cleaning up %s\n", route) + exists, err := isValidToAddOrDelete(router, route) + if err != nil { + return err + } + if !exists { + return nil + } + cmd := exec.Command("sudo", "route", "-n", "delete", route.DestCIDR.String()) + stdInAndOut, err := cmd.CombinedOutput() + if err != nil { + return err + } + msg := fmt.Sprintf("%s", stdInAndOut) + glog.V(4).Infof("%s", msg) + re := regexp.MustCompile("^delete net ([^:]*)$") + if !re.MatchString(msg) { + return fmt.Errorf("error deleting route: %s, %d", msg, len(strings.Split(msg, "\n"))) + } + return nil +} diff --git a/pkg/provision/buildroot.go b/pkg/provision/buildroot.go index 193478f215..2f630f9332 100644 --- a/pkg/provision/buildroot.go +++ b/pkg/provision/buildroot.go @@ -19,30 +19,32 @@ package provision import ( "bytes" "fmt" - "path" "text/template" "time" "github.com/docker/machine/libmachine/auth" "github.com/docker/machine/libmachine/drivers" "github.com/docker/machine/libmachine/engine" - "github.com/docker/machine/libmachine/log" "github.com/docker/machine/libmachine/provision" "github.com/docker/machine/libmachine/provision/pkgaction" - "github.com/docker/machine/libmachine/provision/serviceaction" "github.com/docker/machine/libmachine/swarm" + "github.com/golang/glog" + "github.com/spf13/viper" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/util/retry" ) // BuildrootProvisioner provisions the custom system based on Buildroot type BuildrootProvisioner struct { provision.SystemdProvisioner + clusterName string } // NewBuildrootProvisioner creates a new BuildrootProvisioner func NewBuildrootProvisioner(d drivers.Driver) provision.Provisioner { return &BuildrootProvisioner{ - provision.NewSystemdProvisioner("buildroot", d), + NewSystemdProvisioner("buildroot", d), + viper.GetString(config.ProfileName), } } @@ -65,7 +67,7 @@ func (p *BuildrootProvisioner) GenerateDockerOptions(dockerPort int) (*provision noPivot := true // Using pivot_root is not supported on fstype rootfs if fstype, err := rootFileSystemType(p); err == nil { - log.Debugf("root file system type: %s", fstype) + glog.Infof("root file system type: %s", fstype) noPivot = fstype == "rootfs" } @@ -79,7 +81,7 @@ Requires= minikube-automount.service docker.socket Type=notify ` if noPivot { - log.Warn("Using fundamentally insecure --no-pivot option") + glog.Warning("Using fundamentally insecure --no-pivot option") engineConfigTmpl += ` # DOCKER_RAMDISK disables pivot_root in Docker, using MS_MOVE instead. Environment=DOCKER_RAMDISK=yes @@ -140,30 +142,11 @@ WantedBy=multi-user.target return nil, err } - dockerCfg := &provision.DockerOptions{ + do := &provision.DockerOptions{ EngineOptions: engineCfg.String(), EngineOptionsPath: "/lib/systemd/system/docker.service", } - - log.Info("Setting Docker configuration on the remote daemon...") - - if _, err = p.SSHCommand(fmt.Sprintf("sudo mkdir -p %s && printf %%s \"%s\" | sudo tee %s", path.Dir(dockerCfg.EngineOptionsPath), dockerCfg.EngineOptions, dockerCfg.EngineOptionsPath)); err != nil { - return nil, err - } - - // To make sure if there is a already-installed docker on the ISO to pick up the new systemd file - if err := p.Service("", serviceaction.DaemonReload); err != nil { - return nil, err - } - - if err := p.Service("docker", serviceaction.Enable); err != nil { - return nil, err - } - - if err := p.Service("docker", serviceaction.Restart); err != nil { - return nil, err - } - return dockerCfg, nil + return do, updateUnit(p, "docker", do.EngineOptions, do.EngineOptionsPath) } // Package installs a package @@ -177,18 +160,18 @@ func (p *BuildrootProvisioner) Provision(swarmOptions swarm.Options, authOptions p.AuthOptions = authOptions p.EngineOptions = engineOptions - log.Infof("provisioning hostname %q", p.Driver.GetMachineName()) + glog.Infof("provisioning hostname %q", p.Driver.GetMachineName()) if err := p.SetHostname(p.Driver.GetMachineName()); err != nil { return err } p.AuthOptions = setRemoteAuthOptions(p) - log.Debugf("set auth options %+v", p.AuthOptions) + glog.Infof("set auth options %+v", p.AuthOptions) - log.Debugf("setting up certificates") + glog.Infof("setting up certificates") configAuth := func() error { if err := configureAuth(p); err != nil { - log.Warnf("configureAuth failed: %v", err) + glog.Warningf("configureAuth failed: %v", err) return &retry.RetriableError{Err: err} } return nil @@ -196,13 +179,13 @@ func (p *BuildrootProvisioner) Provision(swarmOptions swarm.Options, authOptions err := retry.Expo(configAuth, time.Second, 2*time.Minute) if err != nil { - log.Debugf("Error configuring auth during provisioning %v", err) + glog.Infof("Error configuring auth during provisioning %v", err) return err } - log.Debugf("setting minikube options for container-runtime") - if err := setContainerRuntimeOptions(p.Driver.GetMachineName(), p); err != nil { - log.Debugf("Error setting container-runtime options during provisioning %v", err) + glog.Infof("setting minikube options for container-runtime") + if err := setContainerRuntimeOptions(p.clusterName, p); err != nil { + glog.Infof("Error setting container-runtime options during provisioning %v", err) return err } diff --git a/pkg/provision/provision.go b/pkg/provision/provision.go index 52fb131960..7b2e9e6539 100644 --- a/pkg/provision/provision.go +++ b/pkg/provision/provision.go @@ -31,10 +31,10 @@ import ( "github.com/docker/machine/libmachine/cert" "github.com/docker/machine/libmachine/drivers" "github.com/docker/machine/libmachine/engine" - "github.com/docker/machine/libmachine/log" "github.com/docker/machine/libmachine/mcnutils" "github.com/docker/machine/libmachine/provision" "github.com/docker/machine/libmachine/swarm" + "github.com/golang/glog" "github.com/pkg/errors" "k8s.io/minikube/pkg/minikube/assets" "k8s.io/minikube/pkg/minikube/command" @@ -66,11 +66,24 @@ func init() { } +// NewSystemdProvisioner is our fork of the same name in the upstream provision library, without the packages +func NewSystemdProvisioner(osReleaseID string, d drivers.Driver) provision.SystemdProvisioner { + return provision.SystemdProvisioner{ + GenericProvisioner: provision.GenericProvisioner{ + SSHCommander: provision.GenericSSHCommander{Driver: d}, + DockerOptionsDir: "/etc/docker", + DaemonOptionsFile: "/etc/systemd/system/docker.service.d/10-machine.conf", + OsReleaseID: osReleaseID, + Driver: d, + }, + } +} + func configureAuth(p miniProvisioner) error { - log.Infof("configureAuth start") + glog.Infof("configureAuth start") start := time.Now() defer func() { - log.Infof("configureAuth took %s", time.Since(start)) + glog.Infof("configureAuth took %s", time.Since(start)) }() driver := p.GetDriver() @@ -90,7 +103,7 @@ func configureAuth(p miniProvisioner) error { // The Host IP is always added to the certificate's SANs list hosts := append(authOptions.ServerCertSANs, ip, "localhost", "127.0.0.1") - log.Debugf("generating server cert: %s ca-key=%s private-key=%s org=%s san=%s", + glog.Infof("generating server cert: %s ca-key=%s private-key=%s org=%s san=%s", authOptions.ServerCertPath, authOptions.CaCertPath, authOptions.CaPrivateKeyPath, @@ -116,11 +129,11 @@ func configureAuth(p miniProvisioner) error { } func copyHostCerts(authOptions auth.Options) error { - log.Infof("copyHostCerts") + glog.Infof("copyHostCerts") err := os.MkdirAll(authOptions.StorePath, 0700) if err != nil { - log.Errorf("mkdir failed: %v", err) + glog.Errorf("mkdir failed: %v", err) } hostCerts := map[string]string{ @@ -144,7 +157,7 @@ func copyHostCerts(authOptions auth.Options) error { } func copyRemoteCerts(authOptions auth.Options, driver drivers.Driver) error { - log.Infof("copyRemoteCerts") + glog.Infof("copyRemoteCerts") remoteCerts := map[string]string{ authOptions.CaCertPath: authOptions.CaCertRemotePath, @@ -276,3 +289,16 @@ func concatStrings(src []string, prefix string, postfix string) []string { } return ret } + +// updateUnit efficiently updates a systemd unit file +func updateUnit(p provision.SSHCommander, name string, content string, dst string) error { + glog.Infof("Updating %s unit: %s ...", name, dst) + + if _, err := p.SSHCommand(fmt.Sprintf("sudo mkdir -p %s && printf %%s \"%s\" | sudo tee %s.new", path.Dir(dst), content, dst)); err != nil { + return err + } + if _, err := p.SSHCommand(fmt.Sprintf("sudo diff -u %s %s.new || { sudo mv %s.new %s; sudo systemctl -f daemon-reload && sudo sudo systemctl -f restart %s; }", dst, dst, dst, dst, name)); err != nil { + return err + } + return nil +} diff --git a/pkg/provision/ubuntu.go b/pkg/provision/ubuntu.go index 7cebe18dbb..3fbf006b69 100644 --- a/pkg/provision/ubuntu.go +++ b/pkg/provision/ubuntu.go @@ -19,18 +19,18 @@ package provision import ( "bytes" "fmt" - "path" "text/template" "time" "github.com/docker/machine/libmachine/auth" "github.com/docker/machine/libmachine/drivers" "github.com/docker/machine/libmachine/engine" - "github.com/docker/machine/libmachine/log" "github.com/docker/machine/libmachine/provision" "github.com/docker/machine/libmachine/provision/pkgaction" - "github.com/docker/machine/libmachine/provision/serviceaction" "github.com/docker/machine/libmachine/swarm" + "github.com/golang/glog" + "github.com/spf13/viper" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/util/retry" ) @@ -43,7 +43,8 @@ type UbuntuProvisioner struct { func NewUbuntuProvisioner(d drivers.Driver) provision.Provisioner { return &UbuntuProvisioner{ BuildrootProvisioner{ - provision.NewSystemdProvisioner("ubuntu", d), + NewSystemdProvisioner("ubuntu", d), + viper.GetString(config.ProfileName), }, } } @@ -67,7 +68,7 @@ func (p *UbuntuProvisioner) GenerateDockerOptions(dockerPort int) (*provision.Do noPivot := true // Using pivot_root is not supported on fstype rootfs if fstype, err := rootFileSystemType(p); err == nil { - log.Debugf("root file system type: %s", fstype) + glog.Infof("root file system type: %s", fstype) noPivot = fstype == "rootfs" } @@ -83,7 +84,7 @@ Requires=docker.socket Type=notify ` if noPivot { - log.Warn("Using fundamentally insecure --no-pivot option") + glog.Warning("Using fundamentally insecure --no-pivot option") engineConfigTmpl += ` # DOCKER_RAMDISK disables pivot_root in Docker, using MS_MOVE instead. Environment=DOCKER_RAMDISK=yes @@ -144,30 +145,11 @@ WantedBy=multi-user.target return nil, err } - dockerCfg := &provision.DockerOptions{ + do := &provision.DockerOptions{ EngineOptions: engineCfg.String(), EngineOptionsPath: "/lib/systemd/system/docker.service", } - - log.Info("Setting Docker configuration on the remote daemon...") - - if _, err = p.SSHCommand(fmt.Sprintf("sudo mkdir -p %s && printf %%s \"%s\" | sudo tee %s", path.Dir(dockerCfg.EngineOptionsPath), dockerCfg.EngineOptions, dockerCfg.EngineOptionsPath)); err != nil { - return nil, err - } - - // because in kic base image we pre-install docker it already has a service file. we need to daemon-reload for the new systemd file - if err := p.Service("", serviceaction.DaemonReload); err != nil { - return nil, err - } - - if err := p.Service("docker", serviceaction.Enable); err != nil { - return nil, err - } - - if err := p.Service("docker", serviceaction.Restart); err != nil { - return nil, err - } - return dockerCfg, nil + return do, updateUnit(p, "docker", do.EngineOptions, do.EngineOptionsPath) } // Package installs a package @@ -181,32 +163,33 @@ func (p *UbuntuProvisioner) Provision(swarmOptions swarm.Options, authOptions au p.AuthOptions = authOptions p.EngineOptions = engineOptions - log.Infof("provisioning hostname %q", p.Driver.GetMachineName()) + glog.Infof("provisioning hostname %q", p.Driver.GetMachineName()) if err := p.SetHostname(p.Driver.GetMachineName()); err != nil { return err } p.AuthOptions = setRemoteAuthOptions(p) - log.Debugf("set auth options %+v", p.AuthOptions) + glog.Infof("set auth options %+v", p.AuthOptions) - log.Debugf("setting up certificates") + glog.Infof("setting up certificates") configAuth := func() error { if err := configureAuth(p); err != nil { - log.Warnf("configureAuth failed: %v", err) + glog.Warningf("configureAuth failed: %v", err) return &retry.RetriableError{Err: err} } return nil } err := retry.Expo(configAuth, time.Second, 2*time.Minute) + if err != nil { - log.Debugf("Error configuring auth during provisioning %v", err) + glog.Infof("Error configuring auth during provisioning %v", err) return err } - log.Debugf("setting minikube options for container-runtime") - if err := setContainerRuntimeOptions(p.Driver.GetMachineName(), p); err != nil { - log.Debugf("Error setting container-runtime options during provisioning %v", err) + glog.Infof("setting minikube options for container-runtime") + if err := setContainerRuntimeOptions(p.clusterName, p); err != nil { + glog.Infof("Error setting container-runtime options during provisioning %v", err) return err } diff --git a/pkg/util/config.go b/pkg/util/config.go deleted file mode 100644 index 6d043e1e61..0000000000 --- a/pkg/util/config.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "fmt" - "net" - "reflect" - "strconv" - "strings" - "time" - - utilnet "k8s.io/apimachinery/pkg/util/net" -) - -// findNestedElement uses reflection to find the element corresponding to the dot-separated string parameter. -func findNestedElement(s string, c interface{}) (reflect.Value, error) { - fields := strings.Split(s, ".") - - // Take the ValueOf to get a pointer, so we can actually mutate the element. - e := reflect.Indirect(reflect.ValueOf(c).Elem()) - - for _, field := range fields { - e = reflect.Indirect(e.FieldByName(field)) - - // FieldByName returns the zero value if the field does not exist. - if e == (reflect.Value{}) { - return e, fmt.Errorf("unable to find field by name: %s", field) - } - // Start the loop again, on the next level. - } - return e, nil -} - -// setElement sets the supplied element to the value in the supplied string. The string will be coerced to the correct type. -func setElement(e reflect.Value, v string) error { - switch e.Interface().(type) { - case int, int32, int64: - return convertInt(e, v) - case string: - return convertString(e, v) - case float32, float64: - return convertFloat(e, v) - case bool: - return convertBool(e, v) - case net.IP: - return convertIP(e, v) - case net.IPNet: - return convertCIDR(e, v) - case utilnet.PortRange: - return convertPortRange(e, v) - case time.Duration: - return convertDuration(e, v) - case []string: - vals := strings.Split(v, ",") - e.Set(reflect.ValueOf(vals)) - case map[string]string: - return convertMap(e, v) - default: - // Last ditch attempt to convert anything based on its underlying kind. - // This covers any types that are aliased to a native type - return convertKind(e, v) - } - - return nil -} - -func convertMap(e reflect.Value, v string) error { - if e.IsNil() { - e.Set(reflect.MakeMap(e.Type())) - } - vals := strings.Split(v, ",") - for _, subitem := range vals { - subvals := strings.FieldsFunc(subitem, func(c rune) bool { - return c == '<' || c == '=' || c == '>' - }) - if len(subvals) != 2 { - return fmt.Errorf("unparsable %s", v) - } - e.SetMapIndex(reflect.ValueOf(subvals[0]), reflect.ValueOf(subvals[1])) - } - return nil -} - -func convertKind(e reflect.Value, v string) error { - switch e.Kind() { - case reflect.Int, reflect.Int32, reflect.Int64: - return convertInt(e, v) - case reflect.String: - return convertString(e, v) - case reflect.Float32, reflect.Float64: - return convertFloat(e, v) - case reflect.Bool: - return convertBool(e, v) - default: - return fmt.Errorf("unable to set type %T", e.Kind()) - } -} - -func convertInt(e reflect.Value, v string) error { - i, err := strconv.Atoi(v) - if err != nil { - return fmt.Errorf("error converting input %s to an integer: %v", v, err) - } - e.SetInt(int64(i)) - return nil -} - -func convertString(e reflect.Value, v string) error { - e.SetString(v) - return nil -} - -func convertFloat(e reflect.Value, v string) error { - f, err := strconv.ParseFloat(v, 64) - if err != nil { - return fmt.Errorf("error converting input %s to a float: %v", v, err) - } - e.SetFloat(f) - return nil -} - -func convertBool(e reflect.Value, v string) error { - b, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("error converting input %s to a bool: %v", v, err) - } - e.SetBool(b) - return nil -} - -func convertIP(e reflect.Value, v string) error { - ip := net.ParseIP(v) - if ip == nil { - return fmt.Errorf("error converting input %s to an IP", v) - } - e.Set(reflect.ValueOf(ip)) - return nil -} - -func convertCIDR(e reflect.Value, v string) error { - _, cidr, err := net.ParseCIDR(v) - if err != nil { - return fmt.Errorf("error converting input %s to a CIDR: %v", v, err) - } - e.Set(reflect.ValueOf(*cidr)) - return nil -} - -func convertPortRange(e reflect.Value, v string) error { - pr, err := utilnet.ParsePortRange(v) - if err != nil { - return fmt.Errorf("error converting input %s to PortRange: %v", v, err) - } - e.Set(reflect.ValueOf(*pr)) - return nil -} - -func convertDuration(e reflect.Value, v string) error { - dur, err := time.ParseDuration(v) - if err != nil { - return fmt.Errorf("error converting input %s to Duration: %v", v, err) - } - e.Set(reflect.ValueOf(dur)) - return nil -} - -// FindAndSet sets the nested value. -func FindAndSet(path string, c interface{}, value string) error { - elem, err := findNestedElement(path, c) - if err != nil { - return err - } - return setElement(elem, value) -} diff --git a/pkg/util/config_test.go b/pkg/util/config_test.go deleted file mode 100644 index 2ca74c95ac..0000000000 --- a/pkg/util/config_test.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "math" - "net" - "reflect" - "testing" - "time" - - utilnet "k8s.io/apimachinery/pkg/util/net" -) - -type aliasedString string - -type testConfig struct { - A string - B int - C float32 - D subConfig1 - E *subConfig2 -} - -type subConfig1 struct { - F string - G int - H float32 - I subConfig3 -} - -type subConfig2 struct { - J string - K int - L float32 -} - -type subConfig3 struct { - M string - N int - O float32 - P bool - Q net.IP - R utilnet.PortRange - S []string - T aliasedString - U net.IPNet - V time.Duration -} - -func buildConfig() testConfig { - _, cidr, _ := net.ParseCIDR("12.34.56.78/16") - return testConfig{ - A: "foo", - B: 1, - C: 1.1, - D: subConfig1{ - F: "bar", - G: 2, - H: 2.2, - I: subConfig3{ - M: "baz", - N: 3, - O: 3.3, - P: false, - Q: net.ParseIP("12.34.56.78"), - R: utilnet.PortRange{Base: 2, Size: 4}, - U: *cidr, - V: 5 * time.Second, - }, - }, - E: &subConfig2{ - J: "bat", - K: 4, - L: 4.4, - }, - } -} - -func TestFindNestedStrings(t *testing.T) { - a := buildConfig() - for _, tc := range []struct { - input string - output string - }{ - {"A", "foo"}, - {"D.F", "bar"}, - {"D.I.M", "baz"}, - {"E.J", "bat"}, - } { - v, err := findNestedElement(tc.input, &a) - if err != nil { - t.Fatalf("Did not expect error. Got: %v", err) - } - if v.String() != tc.output { - t.Fatalf("Expected: %s, got %s", tc.output, v.String()) - } - } -} - -func TestFindNestedInts(t *testing.T) { - a := buildConfig() - - for _, tc := range []struct { - input string - output int64 - }{ - {"B", 1}, - {"D.G", 2}, - {"D.I.N", 3}, - {"E.K", 4}, - } { - v, err := findNestedElement(tc.input, &a) - if err != nil { - t.Fatalf("Did not expect error. Got: %v", err) - } - if v.Int() != tc.output { - t.Fatalf("Expected: %d, got %d", tc.output, v.Int()) - } - } -} - -func checkFloats(f1, f2 float64) bool { - return math.Abs(f1-f2) < .00001 -} - -func TestFindNestedFloats(t *testing.T) { - a := buildConfig() - for _, tc := range []struct { - input string - output float64 - }{ - {"C", 1.1}, - {"D.H", 2.2}, - {"D.I.O", 3.3}, - {"E.L", 4.4}, - } { - v, err := findNestedElement(tc.input, &a) - if err != nil { - t.Fatalf("Did not expect error. Got: %v", err) - } - - // Floating point comparison is tricky. - if !checkFloats(tc.output, v.Float()) { - t.Fatalf("Expected: %v, got %v", tc.output, v.Float()) - } - } -} - -func TestSetElement(t *testing.T) { - for _, tc := range []struct { - path string - newval string - checker func(testConfig) bool - }{ - {"A", "newstring", func(t testConfig) bool { return t.A == "newstring" }}, - {"B", "13", func(t testConfig) bool { return t.B == 13 }}, - {"C", "3.14", func(t testConfig) bool { return checkFloats(float64(t.C), 3.14) }}, - {"D.F", "fizzbuzz", func(t testConfig) bool { return t.D.F == "fizzbuzz" }}, - {"D.G", "4", func(t testConfig) bool { return t.D.G == 4 }}, - {"D.H", "7.3", func(t testConfig) bool { return checkFloats(float64(t.D.H), 7.3) }}, - {"E.J", "otherstring", func(t testConfig) bool { return t.E.J == "otherstring" }}, - {"E.K", "17", func(t testConfig) bool { return t.E.K == 17 }}, - {"E.L", "1.234", func(t testConfig) bool { return checkFloats(float64(t.E.L), 1.234) }}, - {"D.I.P", "true", func(t testConfig) bool { return t.D.I.P == true }}, - {"D.I.P", "false", func(t testConfig) bool { return t.D.I.P == false }}, - {"D.I.Q", "11.22.33.44", func(t testConfig) bool { return t.D.I.Q.Equal(net.ParseIP("11.22.33.44")) }}, - {"D.I.R", "7-11", func(t testConfig) bool { return t.D.I.R.Base == 7 && t.D.I.R.Size == 5 }}, - {"D.I.S", "a,b", func(t testConfig) bool { return reflect.DeepEqual(t.D.I.S, []string{"a", "b"}) }}, - {"D.I.T", "foo", func(t testConfig) bool { return t.D.I.T == "foo" }}, - {"D.I.U", "11.22.0.0/16", func(t testConfig) bool { return t.D.I.U.String() == "11.22.0.0/16" }}, - {"D.I.V", "5s", func(t testConfig) bool { return t.D.I.V == 5*time.Second }}, - } { - a := buildConfig() - if err := FindAndSet(tc.path, &a, tc.newval); err != nil { - t.Fatalf("Error setting value: %v", err) - } - if !tc.checker(a) { - t.Fatalf("Error, values not correct: %v, %s, %s", a, tc.newval, tc.path) - } - - } -} diff --git a/pkg/util/utils.go b/pkg/util/utils.go index fdd38f4f35..37de6085cd 100644 --- a/pkg/util/utils.go +++ b/pkg/util/utils.go @@ -38,12 +38,13 @@ func CalculateSizeInMB(humanReadableSize string) (int, error) { if err == nil { humanReadableSize += "mb" } - size, err := units.FromHumanSize(humanReadableSize) + // parse the size suffix binary instead of decimal so that 1G -> 1024MB instead of 1000MB + size, err := units.RAMInBytes(humanReadableSize) if err != nil { return 0, fmt.Errorf("FromHumanSize: %v", err) } - return int(size / units.MB), nil + return int(size / units.MiB), nil } // GetBinaryDownloadURL returns a suitable URL for the platform diff --git a/pkg/util/utils_test.go b/pkg/util/utils_test.go index f1fe867c48..55392d7ebc 100644 --- a/pkg/util/utils_test.go +++ b/pkg/util/utils_test.go @@ -51,6 +51,7 @@ func TestCalculateSizeInMB(t *testing.T) { {"1024KB", 1}, {"1024mb", 1024}, {"1024b", 0}, + {"1g", 1024}, } for _, tt := range testData { @@ -59,7 +60,7 @@ func TestCalculateSizeInMB(t *testing.T) { t.Fatalf("unexpected err: %v", err) } if number != tt.expectedNumber { - t.Fatalf("Expected '%d'' but got '%d'", tt.expectedNumber, number) + t.Fatalf("Expected '%d' but got '%d' from size '%s'", tt.expectedNumber, number, tt.size) } } } diff --git a/site/assets/scss/_variables_project.scss b/site/assets/scss/_variables_project.scss index 01a924faa8..768b6b8662 100644 --- a/site/assets/scss/_variables_project.scss +++ b/site/assets/scss/_variables_project.scss @@ -3,16 +3,18 @@ // minikube colors, taken from the logo $mk-dark: #306EE5; $mk-medium: #1FC3CF; -$mk-light:#C7EAEC; +$mk-midlight: #e8f3f3; +$mk-light:#CCEBEC; +$mk-verylight: lighten($mk-light, 15%); $mk-verydark: darken($mk-dark, 15%); // bootstrap colors $dark: #403F4C !default; $blue: #72A1E5 !default; $orange: #BA5A31 !default; -$gray-100: #f8f9fa !default; +$gray-100: #f6f6f8 !default; $gray-200: #eee !default; -$gray-300: #dee2e6 !default; +$gray-300: #dedeef !default; $gray-400: #ccc !default; $gray-500: #adb5bd !default; $gray-600: #888 !default; @@ -31,7 +33,30 @@ $danger: #ED6A5A !default; $white: #fff !default; $light: $mk-light; $medium: $mk-medium; +$spacer: 0.5em; +body, p { + font-family: 'Lora', serif !important; +} + + +h1,h2,h3,h4,h5 { + font-family: 'Open Sans', sans-serif !important; +} + +.step { + margin-top: 2em; + font-family: 'Open Sans', sans-serif !important; + + .text-primary { + color: $mk-dark !important; + } + + .fa-stack { + color: $mk-midlight !important; + margin-right: 0.4em; + } +} // Navigation bar $navbar-dark-color: $mk-dark; @@ -151,15 +176,41 @@ section.td-box--height-auto { } + +// Allow code tags to span most of a window length (default is 80%) +pre { + max-width: 99% !important; + font-family: 'Inconsolata', monospace !important; + font-size: 13px !important; +} + div.td-content { - // Allow code tags to span most of a window length (default is 80%) - pre { - max-width: 99% !important; - } h2 { - font-size: 1.4rem !important; + font-size: 1.6rem !important; } + h3 { font-size: 1.3rem !important; } + + .highlight { + padding: 0.5em !important; + margin-top: 1.25em; + margin-bottom: 1.25em; + } } + +.td-sidebar { + background-color: $gray-100 !important; +} + +.td-search-input { + background-color: #fff !important; +} + +// make search bar wider https://www.docsy.dev/docs/adding-content/navigation/#configure-local-search-with-lunr +.popover.offline-search-result { + background-color: $gray-200 !important; + max-width: 460px; +} + diff --git a/site/config.toml b/site/config.toml index 091e83fe83..7848140bf2 100644 --- a/site/config.toml +++ b/site/config.toml @@ -33,39 +33,20 @@ pygmentsStyle = "tango" [permalinks] blog = "/:section/:year/:month/:day/:slug/" -[module] - [[module.mounts]] - source = "../deploy/addons/gvisor/" - target = "content/gvisor/" - [[module.mounts]] - source = "../deploy/addons/helm-tiller/" - target = "content/helm-tiller/" - [[module.mounts]] - source = "../deploy/addons/istio/" - target = "content/istio/" - [[module.mounts]] - source = "../deploy/addons/ingress-dns/" - target = "content/ingress-dns/" - [[module.mounts]] - source = "../deploy/addons/storage-provisioner-gluster/" - target = "content/storage-provisioner-gluster/" - [[module.mounts]] - source = "../deploy/addons/layouts/" - target = "layouts" +[markup] + [markup.highlight] + codeFences = true + hl_Lines = "" + lineNoStart = 1 + lineNos = false + lineNumbersInTable = true + noClasses = true + style = "vs" + tabWidth = 4 - [[module.mounts]] - source = "content/en" - target = "content" - [[module.mounts]] - source = "layouts" - target = "layouts" - -## Configuration for BlackFriday markdown parser: https://github.com/russross/blackfriday -[blackfriday] -plainIDAnchors = true -hrefTargetBlank = true -angledQuotes = false -latexDashes = true +# allow html in markdown +[markup.goldmark.renderer] + unsafe=true # Image processing configuration. [imaging] @@ -95,7 +76,7 @@ weight = 1 [params] copyright = "The Kubernetes Authors -- " # The latest release of minikube -latest_release = "1.8.1" +latest_release = "1.9.1" privacy_policy = "" @@ -112,7 +93,10 @@ github_project_repo = "" github_subdir = "site" # Google Custom Search Engine ID. Remove or comment out to disable search. -gcs_engine_id = "005331096405080631692:s7c4yfpw9sy" +# gcs_engine_id = "005331096405080631692:s7c4yfpw9sy" + +# enabling local search https://www.docsy.dev/docs/adding-content/navigation/#configure-local-search-with-lunr +offlineSearch = true # User interface configuration [params.ui] @@ -159,3 +143,22 @@ no = 'Sorry to hear that. Please - .imagesizing { - width:auto; - text-align:center; - padding:10px; - } - img { - max-width:100%; - height:auto; - } - - - -
- -
- {{< blocks/lead >}} - -
-

Instantly productive.

- -A single command away from reproducing your production environment, from the comfort of localhost. -
- -
-

Highlights

-
    -
  • Always supports the latest Kubernetes release (as well as previous versions)
  • -
  • Cross-platform (Linux, macOS, Windows)
  • -
  • Infinitely configurable local development environment
  • -
  • Multiple container runtimes (crio, containerd, docker, gvisor)
  • -
  • Provides a Docker API endpoint
  • -
-
- - +This page has moved to /docs + {{< /blocks/lead >}} -{{< blocks/section color="white" >}} -{{% blocks/feature icon="fa-star" title="Developer focused" %}} -- [LoadBalancer emulation](https://minikube.sigs.k8s.io/docs/tasks/loadbalancer/) -- [Addons Marketplace](https://minikube.sigs.k8s.io/docs/tasks/addons/) -- [Integrated Dashboard](https://minikube.sigs.k8s.io/docs/tasks/dashboard/) -- [GPU support](https://minikube.sigs.k8s.io/docs/tutorials/nvidia_gpu/) -- Reusable Docker daemon -{{% /blocks/feature %}} - -{{% blocks/feature icon="fa-cogs" title="Infinitely configurable" %}} -- Any container runtime -- Any Kubernetes version -- Any apiserver, kubelet, controller, etcd, proxy, or scheduler option -{{% /blocks/feature %}} - -{{% blocks/feature icon="fa-thumbs-up" title="Cross-platform" %}} -- Bare-metal -- HyperKit -- Hyper-V -- KVM -- VirtualBox -- VMware -{{% /blocks/feature %}} -{{< /blocks/section >}} - -{{< blocks/section color="light" >}} -{{% blocks/feature icon="fab fa-slack" title="Chat with us on Slack" %}} - - - - -The #minikube channel is ready to answer your questions. -{{% /blocks/feature %}} - -{{% blocks/feature icon="fab fa-github" title="Contributions welcome" %}} - - - - -Want to join the fun on Github? New users are always welcome! -{{% /blocks/feature %}} - -{{% blocks/feature icon="fas fa-bullhorn" title="Make your voice heard" %}} - - - - -Have time for 5 quick questions? We would love to hear from you. - -{{% /blocks/feature %}} -{{< /blocks/section >}} diff --git a/site/content/en/blog/_index.md b/site/content/en/blog/_index.md deleted file mode 100644 index 59c4cdd84b..0000000000 --- a/site/content/en/blog/_index.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: "Docsy Blog" -linkTitle: "News" -menu: - main: - weight: 30 ---- - - -This is the **blog** section. It has two categories: News and Releases. - -Files in these directories will be listed in reverse chronological order. - diff --git a/site/content/en/blog/news/_index.md b/site/content/en/blog/news/_index.md deleted file mode 100644 index 13d25eaa45..0000000000 --- a/site/content/en/blog/news/_index.md +++ /dev/null @@ -1,8 +0,0 @@ - ---- -title: "News About Docsy" -linkTitle: "News" -weight: 20 ---- - - diff --git a/site/content/en/blog/news/new-website.md b/site/content/en/blog/news/new-website.md deleted file mode 100755 index 4718f0afe2..0000000000 --- a/site/content/en/blog/news/new-website.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: "New website launched" -date: 2019-08-13 -description: > - About the new minikube website ---- - -As you may have noticed, minikube has a brand new website. - -As part of our [2019 roadmap](https://minikube.sigs.k8s.io/docs/contributing/roadmap/), we identified the need for a user-focused website for documentation, and this is it. It's based on the [Hugo Website Framework](https://gohugo.io/) and uses the [Docsy Templates for Technical Documentation](https://github.com/google/docsy). - -We have more work to do with organizing the documentation and making it more useful, but we hope you are able to enjoy the fruits of our labor. PR's welcome! diff --git a/site/content/en/blog/releases/_index.md b/site/content/en/blog/releases/_index.md deleted file mode 100644 index b1d9eb4ff3..0000000000 --- a/site/content/en/blog/releases/_index.md +++ /dev/null @@ -1,8 +0,0 @@ - ---- -title: "New Releases" -linkTitle: "Releases" -weight: 20 ---- - - diff --git a/site/content/en/blog/releases/v1.13.1.md b/site/content/en/blog/releases/v1.13.1.md deleted file mode 100755 index e62fe052bd..0000000000 --- a/site/content/en/blog/releases/v1.13.1.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: "v1.3.1" -linkTitle: "v1.3.1 released" -date: 2019-08-13 -description: > - Release notes for v1.3.1 ---- - -minikube v1.3.1 is now available, which addresses regressions found in the v1.3.0 release. Here are the [changes](https://raw.githubusercontent.com/kubernetes/minikube/v1.3.1/CHANGELOG.md) - -* Update code references to point to new documentation site [#5052](https://github.com/kubernetes/minikube/pull/5052) -* Localization support for help text [#4814](https://github.com/kubernetes/minikube/pull/4814) -* Upgrade cheggaaa/pb from v1.0.27 to v3.0.1 [#5025](https://github.com/kubernetes/minikube/pull/5025) -* Restore --disable-driver-mounts flag [#5026](https://github.com/kubernetes/minikube/pull/5026) -* Fixed the template for dashboard output [#5004](https://github.com/kubernetes/minikube/pull/5004) -* Use a temp dest to atomically download the iso [#5000](https://github.com/kubernetes/minikube/pull/5000) - -Thank you to our merry band of contributors for assembling this last minute bug fix release. - -- Jituri, Pranav -- Medya Ghazizadeh -- Pranav Jituri -- Ramiro Berrelleza -- Sharif Elgamal -- Thomas Strömberg -- josedonizetti diff --git a/site/content/en/docs/Concepts/_index.md b/site/content/en/docs/Concepts/_index.md deleted file mode 100644 index 2b874a5243..0000000000 --- a/site/content/en/docs/Concepts/_index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: "Concepts" -linkTitle: "Concepts" -weight: 4 -description: > - Concepts that users and contributors should be aware of. ---- diff --git a/site/content/en/docs/Contributing/translations.md b/site/content/en/docs/Contributing/translations.md deleted file mode 100644 index 40f7cfdee1..0000000000 --- a/site/content/en/docs/Contributing/translations.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "Translations" -date: 2019-09-30 -weight: 3 -description: > - How to add translations ---- - -All translations are stored in the top-level `translations` directory. - -### Adding Translations To an Existing Language -* Run `make extract` to make sure all strings are up to date -* Add translated strings to the appropriate json files in the 'translations' - directory. - -### Adding a New Language -* Add a new json file with the locale code of the language you want to add - translations for, e.g. en for English. -* Run `make extract` to populate that file with the strings to translate in json - form. -* Add translations to as many strings as you'd like. diff --git a/site/content/en/docs/Overview/_index.md b/site/content/en/docs/Overview/_index.md deleted file mode 100644 index 5094d5fe67..0000000000 --- a/site/content/en/docs/Overview/_index.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: "Overview" -linkTitle: "Overview" -weight: 1 -description: > - What is it? ---- - -minikube implements a local Kubernetes cluster on macOS, Linux, and Windows. - -minikube's [primary goals](https://minikube.sigs.k8s.io/docs/concepts/principles/) are to be the best tool for local Kubernetes application development and to support all Kubernetes features that fit. - -minikube runs the latest stable release of Kubernetes, with support for standard Kubernetes features like: - -* [LoadBalancer](https://minikube.sigs.k8s.io/docs/tasks/loadbalancer/) - using `minikube tunnel` -* Multi-cluster - using `minikube start -p ` -* NodePorts - using `minikube service` -* [Persistent Volumes](https://minikube.sigs.k8s.io/docs/reference/persistent_volumes/) -* Ingress -* RBAC -* [Dashboard](https://minikube.sigs.k8s.io/docs/tasks/dashboard/) - `minikube dashboard` -* [Container runtimes](https://minikube.sigs.k8s.io/docs/reference/runtimes/) - `start --container-runtime` -* [Configure apiserver and kubelet options](https://minikube.sigs.k8s.io/docs/reference/configuration/kubernetes/) via command-line flags - -As well as developer-friendly features: - -* [Addons](https://minikube.sigs.k8s.io/docs/tasks/addons/) - a marketplace for developers to share configurations for running services on minikube -* [GPU support](https://minikube.sigs.k8s.io/docs/tutorials/nvidia_gpu/) - for machine learning -* [Filesystem mounts](https://minikube.sigs.k8s.io/docs/tasks/mount/) -* Automatic failure analysis - -## Why do I want it? - -If you would like to develop Kubernetes applications: - -* locally -* offline -* using the latest version of Kubernetes - -Then minikube is for you. - -* **What is it good for?** Developing local Kubernetes applications -* **What is it not good for?** Production Kubernetes deployments - -## Where should I go next? - -* [Getting Started](/docs/start/): Get started with minikube -* [Examples](/docs/examples/): Check out some minikube examples! - -📣😀 **Please fill out our [fast 5-question survey](https://forms.gle/Gg3hG5ZySw8c1C24A)** so that we can learn how & why you use minikube, and what improvements we should make. Thank you! 💃🏽🎉 diff --git a/site/content/en/docs/Reference/Commands/addons.md b/site/content/en/docs/Reference/Commands/addons.md deleted file mode 100644 index e87deb9216..0000000000 --- a/site/content/en/docs/Reference/Commands/addons.md +++ /dev/null @@ -1,95 +0,0 @@ ---- -title: "addons" -linkTitle: "addons" -weight: 1 -date: 2019-08-01 -description: > - Modifies minikube addons files using subcommands like "minikube addons enable dashboard" ---- - -## Overview - -* **configure**: Configures the addon w/ADDON_NAME within minikube -* **disable**: Disables the addon w/ADDON_NAME within minikube -* **enable**: Enables the addon w/ADDON_NAME within minikube -* **list**: Lists all available minikube addons as well as their current statuses (enabled/disabled) -* **open**: Opens the addon w/ADDON_NAME within minikube - -## minikube addons configure - -Configures the addon w/ADDON_NAME within minikube (example: minikube addons configure registry-creds). For a list of available addons use: minikube addons list - -``` -minikube addons configure ADDON_NAME [flags] -``` - -## minikube addons disable - -Disables the addon w/ADDON_NAME within minikube (example: minikube addons disable dashboard). For a list of available addons use: minikube addons list - -``` -minikube addons disable ADDON_NAME [flags] -``` - -## minikube addons enable - -Enables the addon w/ADDON_NAME within minikube (example: minikube addons enable dashboard). For a list of available addons use: minikube addons list - -``` -minikube addons enable ADDON_NAME [flags] -``` - -or - -``` -minikube start --addons ADDON_NAME [flags] -``` - -## minikube addons list - -Lists all available minikube addons as well as their current statuses (enabled/disabled) - -``` -minikube addons list [flags] -``` - -### Options - -``` - -h, --help help for list - -o, --output string minikube addons list --output OUTPUT. json, list (default "list") -``` - -## minikube addons open - -Opens the addon w/ADDON_NAME within minikube (example: minikube addons open dashboard). For a list of available addons use: minikube addons list - -``` -minikube addons open ADDON_NAME [flags] -``` - -### Options - -``` - --format string Format to output addons URL in. This format will be applied to each url individually and they will be printed one at a time. (default "http://{{.IP}}:{{.Port}}") - -h, --help help for open - --https Open the addons URL with https instead of http - --interval int The time interval for each check that wait performs in seconds (default 6) - --url Display the kubernetes addons URL in the CLI instead of opening it in the default browser - --wait int Amount of time to wait for service in seconds (default 20) -``` - - -## Options inherited from parent commands - -``` - --alsologtostderr log to standard error as well as files - -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) - --log_dir string If non-empty, write log files in this directory - --logtostderr log to standard error instead of files - -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") - --stderrthreshold severity logs at or above this threshold go to stderr (default 2) - -v, --v Level log level for V logs - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging -``` diff --git a/site/content/en/docs/Reference/Commands/cache.md b/site/content/en/docs/Reference/Commands/cache.md deleted file mode 100644 index 039eda9940..0000000000 --- a/site/content/en/docs/Reference/Commands/cache.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: "cache" -linkTitle: "cache" -weight: 1 -date: 2019-08-01 -description: > - Add or delete an image from the local cache. ---- - - -## minikube cache add - -Add an image to local cache. - -``` -minikube cache add [flags] -``` - -## minikube cache delete - -Delete an image from the local cache. - -``` -minikube cache delete [flags] -``` - -## minikube cache list - -List all available images from the local cache. - -``` -minikube cache list [flags] -``` - -### Options - -``` - --format string Go template format string for the cache list output. The format for Go templates can be found here: https://golang.org/pkg/text/template/ - For the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#CacheListTemplate (default "{{.CacheImage}}\n") - -h, --help help for list -``` - -## minikube cache reload - -reloads images previously added using the 'cache add' subcommand - -``` -minikube cache reload [flags] -``` \ No newline at end of file diff --git a/site/content/en/docs/Reference/Commands/config.md b/site/content/en/docs/Reference/Commands/config.md deleted file mode 100644 index 8694d6cb0d..0000000000 --- a/site/content/en/docs/Reference/Commands/config.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -title: "config" -linkTitle: "config" -weight: 1 -date: 2019-08-01 -description: > - Modify minikube config ---- - -### Overview - -config modifies minikube config files using subcommands like "minikube config set driver kvm" - -Configurable fields: - - * driver - * container-runtime - * feature-gates - * v - * cpus - * disk-size - * host-only-cidr - * memory - * log_dir - * kubernetes-version - * iso-url - * WantUpdateNotification - * ReminderWaitPeriodInHours - * WantReportError - * WantReportErrorPrompt - * WantKubectlDownloadMsg - * WantNoneDriverWarning - * profile - * bootstrapper - * ShowDriverDeprecationNotification - * ShowBootstrapperDeprecationNotification - * insecure-registry - * hyperv-virtual-switch - * disable-driver-mounts - * cache - * embed-certs - * native-ssh - - -### subcommands - -- **get**: Gets the value of PROPERTY_NAME from the minikube config file - -## minikube config get - -Returns the value of PROPERTY_NAME from the minikube config file. Can be overwritten at runtime by flags or environmental variables. - -### Usage - -``` -minikube config get PROPERTY_NAME [flags] -``` - -## minikube config set - -Sets the PROPERTY_NAME config value to PROPERTY_VALUE - These values can be overwritten by flags or environment variables at runtime. - -### Usage - -``` -minikube config set PROPERTY_NAME PROPERTY_VALUE [flags] -``` - -## minikube config unset - -unsets PROPERTY_NAME from the minikube config file. Can be overwritten by flags or environmental variables - -### Usage - -``` -minikube config unset PROPERTY_NAME [flags] -``` - - -## minikube config view - -### Overview - -Display values currently set in the minikube config file. - -### Usage - -``` -minikube config view [flags] -``` - -### Options - -``` - --format string Go template format string for the config view output. The format for Go templates can be found here: https://golang.org/pkg/text/template/ - For the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate (default "- {{.ConfigKey}}: {{.ConfigValue}}\n") - -h, --help help for view -``` - -### Options inherited from parent commands - -``` - --alsologtostderr log to standard error as well as files - -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) - --log_dir string If non-empty, write log files in this directory - --logtostderr log to standard error instead of files - -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") - --stderrthreshold severity logs at or above this threshold go to stderr (default 2) - -v, --v Level log level for V logs - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging -``` diff --git a/site/content/en/docs/Reference/Commands/service.md b/site/content/en/docs/Reference/Commands/service.md deleted file mode 100644 index 8a7a8b164f..0000000000 --- a/site/content/en/docs/Reference/Commands/service.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -title: "service" -linkTitle: "service" -weight: 1 -date: 2019-08-01 -description: > - Gets the Kubernetes URL(s) for the specified service in your local cluster ---- - -### Overview - -Gets the Kubernetes URL(s) for the specified service in your local cluster. In the case of multiple URLs they will be printed one at a time. - -### Usage - -``` -minikube service [flags] SERVICE -``` - -### Subcommands - -- **list**: Lists the URLs for the services in your local cluster - -### Options - -``` - --format string Format to output service URL in. This format will be applied to each url individually and they will be printed one at a time. (default "http://{{.IP}}:{{.Port}}") - -h, --help help for service - --https Open the service URL with https instead of http - --interval int The initial time interval for each check that wait performs in seconds (default 6) - -n, --namespace string The service namespace (default "default") - --url Display the kubernetes service URL in the CLI instead of opening it in the default browser - --wait int Amount of time to wait for a service in seconds (default 20) -``` - -### Options inherited from parent commands - -``` - --alsologtostderr log to standard error as well as files - -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) - --log_dir string If non-empty, write log files in this directory - --logtostderr log to standard error instead of files - -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") - --stderrthreshold severity logs at or above this threshold go to stderr (default 2) - -v, --v Level log level for V logs - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging -``` - -## minikube service list - -Lists the URLs for the services in your local cluster - -``` -minikube service list [flags] -``` - -### Options - -``` - -h, --help help for list - -n, --namespace string The services namespace -``` - diff --git a/site/content/en/docs/Reference/Commands/ssh.md b/site/content/en/docs/Reference/Commands/ssh.md deleted file mode 100644 index 54d9649233..0000000000 --- a/site/content/en/docs/Reference/Commands/ssh.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: "ssh" -linkTitle: "ssh" -weight: 1 -date: 2019-08-01 -description: > - Log into or run a command on a machine with SSH; similar to 'docker-machine ssh' ---- - - -### Usage - -``` -minikube ssh [flags] -``` - -### Options - -``` - -h, --help help for ssh - --native-ssh Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'. (default true) -``` diff --git a/site/content/en/docs/Reference/Commands/status.md b/site/content/en/docs/Reference/Commands/status.md deleted file mode 100644 index d8c636ccf7..0000000000 --- a/site/content/en/docs/Reference/Commands/status.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -title: "status" -linkTitle: "status" -weight: 1 -date: 2019-08-01 -description: > - Gets the status of a local Kubernetes cluster ---- - - -### Overview - -Gets the status of a local Kubernetes cluster. - Exit status contains the status of minikube's VM, cluster and Kubernetes encoded on it's bits in this order from right to left. - Eg: 7 meaning: 1 (for minikube NOK) + 2 (for cluster NOK) + 4 (for Kubernetes NOK) - -### Usage - -``` -minikube status [flags] -``` - -### Options - -``` - -f, --format string Go template format string for the status output. The format for Go templates can be found here: https://golang.org/pkg/text/template/ - - For the list accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#Status (default "host: {{.Host}}\nkubelet: {{.Kubelet}}\napiserver: {{.APIServer}}\nkubeconfig: {{.Kubeconfig}}\n") - - -h, --help help for status - -o, --output string minikube status --output OUTPUT. json, text (default "text") -``` - -### Options inherited from parent commands - -``` - --alsologtostderr log to standard error as well as files - -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) - --log_dir string If non-empty, write log files in this directory - --logtostderr log to standard error instead of files - -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") - --stderrthreshold severity logs at or above this threshold go to stderr (default 2) - -v, --v Level log level for V logs - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging - ``` - -### Options inherited from parent commands - -``` - --alsologtostderr log to standard error as well as files - -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) - --log_dir string If non-empty, write log files in this directory - --logtostderr log to standard error instead of files - -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") - --stderrthreshold severity logs at or above this threshold go to stderr (default 2) - -v, --v Level log level for V logs - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging -``` diff --git a/site/content/en/docs/Reference/Commands/update-check.md b/site/content/en/docs/Reference/Commands/update-check.md deleted file mode 100644 index 17a666bffd..0000000000 --- a/site/content/en/docs/Reference/Commands/update-check.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: "update-check" -linkTitle: "update-check" -weight: 1 -date: 2019-08-01 -description: > - Print current and latest version number ---- - -### Usage - -``` -minikube update-check [flags] -``` diff --git a/site/content/en/docs/Reference/Configuration/_index.md b/site/content/en/docs/Reference/Configuration/_index.md deleted file mode 100644 index 1615c60d70..0000000000 --- a/site/content/en/docs/Reference/Configuration/_index.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: "Configuration" -linkTitle: "Configuration" -weight: 1 -date: 2019-08-01 -description: > - minikube configuration reference ---- \ No newline at end of file diff --git a/site/content/en/docs/Reference/Configuration/kubernetes.md b/site/content/en/docs/Reference/Configuration/kubernetes.md deleted file mode 100644 index 8781b3f710..0000000000 --- a/site/content/en/docs/Reference/Configuration/kubernetes.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: "Kubernetes" -linkTitle: "Kubernetes" -weight: 3 -date: 2019-08-01 -description: > - Kubernetes configuration reference ---- - -minikube allows users to configure the Kubernetes components with arbitrary values. To use this feature, you can use the `--extra-config` flag on the `minikube start` command. - -This flag is repeated, so you can pass it several times with several different values to set multiple options. - -## Selecting a Kubernetes version - -By default, minikube installs the latest stable version of Kubernetes that was available at the time of the minikube release. You may select a different Kubernetes release by using the `--kubernetes-version` flag, for example: - -`minikube start --kubernetes-version=v1.11.10` - -If you omit this flag, minikube will upgrade your cluster to the default version. If you would like to pin to a specific Kubernetes version across clusters, restarts, and upgrades to minikube, use: - -`minikube config set kubernetes-version v1.11.0` - -minikube follows the [Kubernetes Version and Version Skew Support Policy](https://kubernetes.io/docs/setup/version-skew-policy/), so we guarantee support for the latest build for the last 3 minor Kubernetes releases. When practical, minikube aims for the last 6 minor releases so that users can emulate legacy environments. - -As of September 2019, this means that minikube supports and actively tests against the latest builds of: - -* v1.16 (default) -* v1.15 -* v1.14 -* v1.13 -* v1.12 -* v1.11 (best effort) - -For more up to date information, see `OldestKubernetesVersion` and `NewestKubernetesVersion` in [constants.go](https://github.com/kubernetes/minikube/blob/master/pkg/minikube/constants/constants.go) - -## Modifying Kubernetes defaults - -The kubeadm bootstrapper can be configured by the `--extra-config` flag on the `minikube start` command. It takes a string of the form `component.key=value` where `component` is one of the strings - -* kubeadm -* kubelet -* apiserver -* controller-manager -* scheduler - -and `key=value` is a flag=value pair for the component being configured. For example, - -```shell -minikube start --extra-config=apiserver.v=10 --extra-config=kubelet.max-pods=100 -``` - -For instance, to allow Kubernetes to launch on an unsupported Docker release: - -```shell -minikube start --extra-config=kubeadm.ignore-preflight-errors=SystemVerification -``` diff --git a/site/content/en/docs/Reference/Configuration/minikube.md b/site/content/en/docs/Reference/Configuration/minikube.md deleted file mode 100644 index 8fc3c39438..0000000000 --- a/site/content/en/docs/Reference/Configuration/minikube.md +++ /dev/null @@ -1,164 +0,0 @@ ---- -title: "minikube" -linkTitle: "minikube" -weight: 2 -date: 2019-08-01 -description: > - minikube configuration reference ---- - -## Flags - -Most minikube configuration is done via the flags interface. To see which flags are possible for the start command, run: - -```shell -minikube start --help -``` - -For example: - -```shell -minikube start --apiserver-port 9999 -``` - -Many of these flags are also available to be set via persistent configuration or environment variables. -While most flags are applicable to any command, some are globally scoped: - -``` -Flags: - --alsologtostderr log to standard error as well as files - -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") - -h, --help help for minikube - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) - --log_dir string If non-empty, write log files in this directory - --logtostderr log to standard error instead of files - -p, --profile string The name of the minikube VM being used. - This can be modified to allow for multiple minikube instances to be run independently (default "minikube") - --stderrthreshold severity logs at or above this threshold go to stderr (default 2) - -v, --v Level log level for V logs - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging -``` - -## Persistent Configuration - -minikube allows users to persistently store new default values to be used across all profiles, using the `minikube config` command. This is done providing a property name, and a property value. - -### Listing config properties - -```shell -minikube config -``` - -Example: - -```shell -Configurable fields: - - * driver - * feature-gates - * v - * cpus - * disk-size - * host-only-cidr - * memory - * log_dir - * kubernetes-version - * iso-url - * WantUpdateNotification - * ReminderWaitPeriodInHours - * WantReportError - * WantReportErrorPrompt - * WantKubectlDownloadMsg - * WantNoneDriverWarning - * profile - * bootstrapper - * ShowDriverDeprecationNotification - * ShowBootstrapperDeprecationNotification - * dashboard - * addon-manager - * default-storageclass - * efk - * ingress - * registry - * registry-creds - * freshpod - * default-storageclass - * storage-provisioner - * storage-provisioner-gluster - * metrics-server - * nvidia-driver-installer - * nvidia-gpu-device-plugin - * logviewer - * gvisor - * hyperv-virtual-switch - * disable-driver-mounts - * cache - * embed-certs -``` - -### Listing your property overrides - -```shell -minikube config view -``` - -Example output: - -```shell -- memory: 4096 -- registry: true -- driver: vmware -- dashboard: true -- gvisor: true -``` - -### Setting a new property override - - -```shell -minikube config set -``` - -For example: - -```shell -minikube config set driver hyperkit -``` - -## Environment Configuration - -### Config variables - -minikube supports passing environment variables instead of flags for every value listed in `minikube config`. This is done by passing an environment variable with the prefix `MINIKUBE_`. - -For example the `minikube start --iso-url="$ISO_URL"` flag can also be set by setting the `MINIKUBE_ISO_URL="$ISO_URL"` environment variable. - -### Other variables - -Some features can only be accessed by environment variables, here is a list of these features: - -* **MINIKUBE_HOME** - (string) sets the path for the .minikube directory that minikube uses for state/configuration - -* **MINIKUBE_IN_STYLE** - (bool) manually sets whether or not emoji and colors should appear in minikube. Set to false or 0 to disable this feature, true or 1 to force it to be turned on. - -* **MINIKUBE_WANTUPDATENOTIFICATION** - (bool) sets whether the user wants an update notification for new minikube versions - -* **MINIKUBE_REMINDERWAITPERIODINHOURS** - (int) sets the number of hours to check for an update notification - -* **CHANGE_MINIKUBE_NONE_USER** - (bool) automatically change ownership of ~/.minikube to the value of $SUDO_USER - -* **MINIKUBE_ENABLE_PROFILING** - (int, `1` enables it) enables trace profiling to be generated for minikube - -### Making environment variables persistent - -To make the exported variables persistent: - -* Linux and macOS: Add these declarations to `~/.bashrc` or wherever your shells environment variables are stored. -* Windows: Add these declarations via [system settings](https://support.microsoft.com/en-au/help/310519/how-to-manage-environment-variables-in-windows-xp) or using [setx](https://stackoverflow.com/questions/5898131/set-a-persistent-environment-variable-from-cmd-exe) - -#### Example: Disabling emoji - -```shell -export MINIKUBE_IN_STYLE=false -minikube start -``` diff --git a/site/content/en/docs/Reference/Drivers/_index.md b/site/content/en/docs/Reference/Drivers/_index.md deleted file mode 100644 index c1c0adb612..0000000000 --- a/site/content/en/docs/Reference/Drivers/_index.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: "Drivers" -linkTitle: "Drivers" -weight: 8 -date: 2017-01-05 -date: 2018-08-05 -description: > - Configuring various minikube drivers ---- -minikube uses the Docker Machine library to provide a consistent way to interact with hypervisors. While most drivers are linked directly into the minikube program, some may require an additional binary to be downloaded due to technical or legal restrictions. diff --git a/site/content/en/docs/Reference/Drivers/docker.md b/site/content/en/docs/Reference/Drivers/docker.md deleted file mode 100644 index 164691d267..0000000000 --- a/site/content/en/docs/Reference/Drivers/docker.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: "docker" -linkTitle: "docker" -weight: 3 -date: 2020-02-05 -description: > - Docker driver (EXPERIMENTAL) ---- - -## Overview - -The Docker driver is an experimental VM-free driver that ships with minikube v1.7. - -This driver was inspired by the [kind project](https://kind.sigs.k8s.io/), and uses a modified version of its base image. - -## Special features - -No hypervisor required when run on Linux. - -## Limitations - -As an experimental driver, not all commands are supported on all platforms. Notably: `mount,` `service`, `tunnel`, and others. Most of these limitations will be addressed by minikube v1.8 (March 2020) - -## Issues - -* [Full list of open 'kic-driver' issues](https://github.com/kubernetes/minikube/labels/co%2Fkic-driver) - -## Troubleshooting - -* Run `minikube start --alsologtostderr -v=1` to debug crashes diff --git a/site/content/en/docs/Reference/Networking/_index.md b/site/content/en/docs/Reference/Networking/_index.md deleted file mode 100644 index b1add3ea29..0000000000 --- a/site/content/en/docs/Reference/Networking/_index.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: "Networking" -linkTitle: "Networking" -weight: 6 -date: 2018-08-01 -description: > - How minikube interacts with networks. ---- - -Since minikube runs everything from within a VM, networking can get fairly complicated. \ No newline at end of file diff --git a/site/content/en/docs/Reference/Networking/dns.md b/site/content/en/docs/Reference/Networking/dns.md deleted file mode 100644 index 6c44114b7b..0000000000 --- a/site/content/en/docs/Reference/Networking/dns.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: "DNS Domain" -linkTitle: "DNS Domain" -weight: 6 -date: 2019-10-09 -description: > - Use configured DNS domain in bootstrapper kubeadm ---- - -minikube by default uses **cluster.local** if none is specified via the start flag --dns-domain. The configuration file used by kubeadm are found inside **/var/tmp/minikube/kubeadm.yaml** directory inside minikube. - -Default DNS configuration will look like below - -``` -apiVersion: kubeadm.k8s.io/v1beta1 -kind: InitConfiguration -localAPIEndpoint: -...... -...... ---- -apiVersion: kubeadm.k8s.io/v1beta1 -kind: ClusterConfiguration -..... -..... -kubernetesVersion: v1.16.0 -networking: - dnsDomain: cluster.local - podSubnet: "" - serviceSubnet: 10.96.0.0/12 ---- -``` - -To change the dns pass the value when starting minikube - -``` -minikube start --dns-domain bla.blah.blah -``` - -the dns now changed to bla.blah.blah - -``` -apiVersion: kubeadm.k8s.io/v1beta1 -kind: InitConfiguration -localAPIEndpoint: -...... -...... ---- -apiVersion: kubeadm.k8s.io/v1beta1 -kind: ClusterConfiguration -..... -..... -kubernetesVersion: v1.16.0 -networking: - dnsDomain: bla.blah.blah - podSubnet: "" - serviceSubnet: 10.96.0.0/12 ---- -``` \ No newline at end of file diff --git a/site/content/en/docs/Reference/Networking/vpn.md b/site/content/en/docs/Reference/Networking/vpn.md deleted file mode 100644 index 94392a3980..0000000000 --- a/site/content/en/docs/Reference/Networking/vpn.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: "Host VPN" -linkTitle: "Host VPN" -weight: 6 -date: 2019-08-01 -description: > - Using minikube on a host with a VPN installed ---- - -minikube requires access from the host to the following IP ranges: - -* **192.168.99.0/24**: Used by the minikube VM. Configurable for some hypervisors via `--host-only-cidr` -* **192.168.39.0/24**: Used by the minikube kvm2 driver. -* **10.96.0.0/12**: Used by service cluster IP's. Configurable via `--service-cluster-ip-range` - -Unfortunately, many VPN configurations route packets to these destinations through an encrypted tunnel, rather than allowing the packets to go to the minikube VM. - -### Possible workarounds - -1. If you have access, whitelist the above IP ranges in your VPN software -2. In your VPN software, select an option similar to "Allow local (LAN) access when using VPN" [(Cisco VPN example)](https://superuser.com/questions/987150/virtualbox-guest-os-through-vpn) -3. You may have luck selecting alternate values to the `--host-only-cidr` and `--service-cluster-ip-range` flags. -4. Turn off the VPN diff --git a/site/content/en/docs/Reference/_index.md b/site/content/en/docs/Reference/_index.md deleted file mode 100644 index bfe11dd2b6..0000000000 --- a/site/content/en/docs/Reference/_index.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: "Reference" -linkTitle: "Reference" -weight: 5 -description: > - Low level reference docs ---- - diff --git a/site/content/en/docs/Reference/environment_variables.md b/site/content/en/docs/Reference/environment_variables.md deleted file mode 100644 index 7a02d204d9..0000000000 --- a/site/content/en/docs/Reference/environment_variables.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: "Environment Variables" -linkTitle: "Environment Variables" -weight: 6 -date: 2019-08-01 ---- - -## Config option variables - -minikube supports passing environment variables instead of flags for every value listed in `minikube config`. This is done by passing an environment variable with the prefix `MINIKUBE_`. - -For example the `minikube start --iso-url="$ISO_URL"` flag can also be set by setting the `MINIKUBE_ISO_URL="$ISO_URL"` environment variable. - -## Other variables - -Some features can only be accessed by environment variables, here is a list of these features: - -* **MINIKUBE_HOME** - (string) sets the path for the .minikube directory that minikube uses for state/configuration - -* **MINIKUBE_IN_STYLE** - (bool) manually sets whether or not emoji and colors should appear in minikube. Set to false or 0 to disable this feature, true or 1 to force it to be turned on. - -* **MINIKUBE_WANTUPDATENOTIFICATION** - (bool) sets whether the user wants an update notification for new minikube versions - -* **MINIKUBE_REMINDERWAITPERIODINHOURS** - (int) sets the number of hours to check for an update notification - -* **CHANGE_MINIKUBE_NONE_USER** - (bool) automatically change ownership of ~/.minikube to the value of $SUDO_USER - -* **MINIKUBE_ENABLE_PROFILING** - (int, `1` enables it) enables trace profiling to be generated for minikube - - -## Example: Disabling emoji - -```shell -export MINIKUBE_IN_STYLE=false -minikube start -``` - -## Making values persistent - -To make the exported variables persistent across reboots: - -* Linux and macOS: Add these declarations to `~/.bashrc` or wherever your shells environment variables are stored. -* Windows: Add these declarations via [system settings](https://support.microsoft.com/en-au/help/310519/how-to-manage-environment-variables-in-windows-xp) or using [setx](https://stackoverflow.com/questions/5898131/set-a-persistent-environment-variable-from-cmd-exe) - diff --git a/site/content/en/docs/Reference/runtimes.md b/site/content/en/docs/Reference/runtimes.md deleted file mode 100644 index 5973040ff7..0000000000 --- a/site/content/en/docs/Reference/runtimes.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: "Container Runtimes" -linkTitle: "Container Runtimes" -weight: 6 -date: 2019-08-01 -description: > - Available container runtimes ---- - -### Docker - -The default container runtime in minikube is Docker. You can select it explicitly by using: - -```shell -minikube start --container-runtime=docker -``` - -### CRI-O - -To use [CRI-O](https://github.com/kubernetes-sigs/cri-o): - -```shell -minikube start --container-runtime=cri-o -``` - -## containerd - -To use [containerd](https://github.com/containerd/containerd): - -```shell -minikube start --container-runtime=containerd -``` - -## gvisor - -To use [gvisor](https://gvisor.dev): - -```shell -minikube start --container-runtime=containerd -minikube addons enable gvisor -``` - -## Kata - -Native support for [Kata containers](https://katacontainers.io) is a work-in-progress. See [#4347](https://github.com/kubernetes/minikube/issues/4347) for details. - -In the mean time, it's possible to make Kata containers work within minikube using a bit of [elbow grease](https://gist.github.com/olberger/0413cfb0769dcdc34c83788ced583fa9). diff --git a/site/content/en/docs/Reference/uninstalling_minikube.md b/site/content/en/docs/Reference/uninstalling_minikube.md deleted file mode 100644 index 7971e5ec98..0000000000 --- a/site/content/en/docs/Reference/uninstalling_minikube.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: "Uninstall minikube" -linkTitle: "Uninstall minikube" -weight: 6 -date: 2019-08-18 -description: > - Reference on uninstalling minikube from your system completely. ---- - -# Uninstall minikube on Windows -Following are the ways you can install minikube on Windows. Depending on how you installed minikube, please follow the guide appropriately. - -## Chocolatey -If you have installed minikube using Chocolatey package manager, follow the below steps to completely uninstall minikube from your system - -- Open a command prompt with Administrator privileges. -- We need to delete the cluster which was created by minikube - `minikube delete` -- Run, `choco uninstall minikube` to remove the minikube package from your system. -- Now, navigate to your User Folder - `C:\Users\YOUR_USER_NAME` (You can also find the path by expanding the environment variable `%USERPROFILE%`) -- In this folder, delete the `.minikube` folder. - -## Windows Installer -If you have downloaded and installed minikube using the Windows Installer provided in our Releases, kindly follow the below steps - -- Open a command prompt with Administrator privileges. -- We need to delete the cluster which was created by minikube - `minikube delete` -- Now, open the Run dialog box (**Win+R**), type in `appwiz.cpl` and hit **Enter** key. -- In there, find an entry for the Minikube installer, right click on it & click on **Uninstall**. -- Follow the onscreen prompts to uninstall minikube from your system. -- Now, navigate to your User Folder - `C:\Users\YOUR_USER_NAME` (You can also find the path by expanding the environment variable `%USERPROFILE%`) -- In this folder, delete the `.minikube` folder. - -## Binary/Direct -If you have downloaded just the binary and are using it to run minikube, please follow the below steps - -- Open a command prompt with Administrator privileges. -- We need to delete the cluster which was created by minikube - `minikube delete` -- Delete the minikube binary. -- Now, navigate to your User Folder - `C:\Users\YOUR_USER_NAME` (You can also find the path by expanding the environment variable `%USERPROFILE%`) -- In this folder, delete the `.minikube` folder. - - -# Uninstall minikube on Linux -## Binary/Direct -If you have installed minikube using the direct download method, follow the below steps to uninstall minikube completely from your system - -- In the shell, type in `minikube delete` to delete the minikube cluster. -- Remove the binary using `rm /usr/local/bin/minikube` -- Remove the directory containing the minikube configuration `rm -rf ~/.minikube` - -## Debian/Ubuntu (Deb) -If you have installed minikube using the (deb) file, follow the below instructions - -- In the shell, type in `minikube delete` to delete the minikube cluster. -- Uninstall the minikube package completely - `sudo dpkg -P minikube` -- Remove the minikube configuration directory - `rm -rf ~/.minikube` - -## Fedora/Red Hat (RPM) -If you have installed minikube using RPM, follow the below steps - -- In the shell, type in `minikube delete` to delete the minikube cluster. -- Uninstall the minikube package - `sudo rpm -e minikube` -- Remove the minikube configuration directory - `rm -rf ~/.minikube` - - -# Uninstall minikube on MacOS -## Binary/Direct -If you have installed minikube using the direct download method, follow the below steps to uninstall minikube completely from your system - -- In the shell, type in `minikube delete` to delete the minikube cluster. -- Remove the binary using `rm /usr/local/bin/minikube` -- Remove the directory containing the minikube configuration `rm -rf ~/.minikube` - - -## Brew -If you have installed minikube using the direct download method, follow the below steps to uninstall minikube completely from your system - -- In the shell, type in `minikube delete` to delete the minikube cluster. -- Uninstall the minikube package using `brew uninstall minikube` -- Remove the directory containing the minikube configuration `rm -rf ~/.minikube` diff --git a/site/content/en/docs/Start/_index.md b/site/content/en/docs/Start/_index.md deleted file mode 100644 index 2347037978..0000000000 --- a/site/content/en/docs/Start/_index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: "Getting Started" -linkTitle: "Getting Started" -weight: 2 -description: > - How to install and start minikube. ---- diff --git a/site/content/en/docs/Start/includes/post_install.inc b/site/content/en/docs/Start/includes/post_install.inc deleted file mode 100644 index 5f28d307cf..0000000000 --- a/site/content/en/docs/Start/includes/post_install.inc +++ /dev/null @@ -1,22 +0,0 @@ -### Getting to know Kubernetes - -Once started, you can use any regular Kubernetes command to interact with your minikube cluster. For example, you can see the pod states by running: - -```shell - kubectl get po -A -``` - -### Increasing memory allocation - -minikube only allocates 2GB of RAM by default, which is only enough for trivial deployments. For larger -deployments, increase the memory allocation using the `--memory` flag, or make the setting persistent using: - -```shell -minikube config set memory 4096 -``` - -### Where to go next? - -Visit the [examples](/docs/examples) page to get an idea of what you can do with minikube. - -📣😀 **Please fill out our [fast 5-question survey](https://forms.gle/Gg3hG5ZySw8c1C24A)** so that we can learn how & why you use minikube, and what improvements we should make. Thank you! 💃🏽🎉 \ No newline at end of file diff --git a/site/content/en/docs/Start/linux.md b/site/content/en/docs/Start/linux.md deleted file mode 100644 index b52b1690d8..0000000000 --- a/site/content/en/docs/Start/linux.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: "Linux" -linkTitle: "Linux" -weight: 1 ---- - -## Installation - -{{% tabs %}} -{{% tab "Direct" %}} - -Download and install minikube to /usr/local/bin: - -```shell - curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 \ - && sudo install minikube-linux-amd64 /usr/local/bin/minikube -``` -{{% /tab %}} -{{% tab "Debian/Ubuntu (deb)" %}} - -Download and install minikube: - -```shell -curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube_{{< latest >}}-0_amd64.deb \ - && sudo dpkg -i minikube_{{< latest >}}-0_amd64.deb - ``` - -{{% /tab %}} - -{{% tab "Fedora/Red Hat (rpm)" %}} - -Download and install minikube: - -```shell -curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-{{< latest >}}-0.x86_64.rpm \ - && sudo rpm -ivh minikube-{{< latest >}}-0.x86_64.rpm - ``` - -{{% /tab %}} -{{% /tabs %}} - -## Hypervisor Setup - -Verify that your system has virtualization support enabled: - -```shell -egrep -q 'vmx|svm' /proc/cpuinfo && echo yes || echo no -``` - -If the above command outputs "no": - -- If you are running within a VM, your hypervisor does not allow nested virtualization. You will need to use the *None (bare-metal)* driver -- If you are running on a physical machine, ensure that your BIOS has hardware virtualization enabled - -{{% tabs %}} - -{{% tab "KVM" %}} -{{% readfile file="/docs/Reference/Drivers/includes/kvm2_usage.inc" %}} -{{% /tab %}} -{{% tab "VirtualBox" %}} -{{% readfile file="/docs/Reference/Drivers/includes/virtualbox_usage.inc" %}} -{{% /tab %}} -{{% tab "None (bare-metal)" %}} -If you are already running minikube from inside a VM, it is possible to skip the creation of an additional VM layer by using the `none` driver. - -{{% readfile file="/docs/Reference/Drivers/includes/none_usage.inc" %}} -{{% /tab %}} -{{% /tabs %}} - -{{% readfile file="/docs/Start/includes/post_install.inc" %}} diff --git a/site/content/en/docs/Start/macos.md b/site/content/en/docs/Start/macos.md deleted file mode 100644 index 3c41e3a9b9..0000000000 --- a/site/content/en/docs/Start/macos.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: "macOS" -linkTitle: "macOS" -weight: 2 ---- - -### Prerequisites - -* macOS 10.12 (Sierra) -* A hypervisor such as Hyperkit, Parallels, VirtualBox, or VMware Fusion - -### Installation - -{{% tabs %}} -{{% tab "Brew" %}} - -If the [Brew Package Manager](https://brew.sh/) is installed, use it to download and install minikube: - -```shell -brew install minikube -``` - -{{% /tab %}} -{{% tab "Direct" %}} - -Download and install minikube to /usr/local/bin: - -```shell -curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-darwin-amd64 \ - && sudo install minikube-darwin-amd64 /usr/local/bin/minikube -``` -{{% /tab %}} -{{% /tabs %}} - -### Upgrading minikube - -{{% tabs %}} -{{% tab "Brew" %}} - -If the [Brew Package Manager](https://brew.sh/) is installed, use it to download and upgrade minikube: - -```shell -brew update -brew upgrade minikube -``` - -{{% /tab %}} -{{% /tabs %}} - -## Hypervisor Setup - -{{% tabs %}} -{{% tab "Hyperkit" %}} -{{% readfile file="/docs/Reference/Drivers/includes/hyperkit_usage.inc" %}} -{{% /tab %}} -{{% tab "VirtualBox" %}} -{{% readfile file="/docs/Reference/Drivers/includes/virtualbox_usage.inc" %}} -{{% /tab %}} -{{% tab "Parallels" %}} -{{% readfile file="/docs/Reference/Drivers/includes/parallels_usage.inc" %}} -{{% /tab %}} -{{% tab "VMware" %}} -{{% readfile file="/docs/Reference/Drivers/includes/vmware_macos_usage.inc" %}} -{{% /tab %}} - -{{% /tabs %}} - -{{% readfile file="/docs/Start/includes/post_install.inc" %}} diff --git a/site/content/en/docs/Start/windows.md b/site/content/en/docs/Start/windows.md deleted file mode 100644 index 02ff403f99..0000000000 --- a/site/content/en/docs/Start/windows.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -title: "Windows" -linkTitle: "Windows" -weight: 3 ---- - -### Prerequisites - -* Windows 8 or above -* A hypervisor, such as Hyper-V or VirtualBox -* Hardware virtualization support must be enabled in BIOS -* 4GB of RAM - -### Installation - -{{% tabs %}} -{{% tab "Direct" %}} -Download and run the [minikube installer](https://storage.googleapis.com/minikube/releases/latest/minikube-installer.exe) -{{% /tab %}} - -{{% tab "Chocolatey" %}} - -If the [Chocolatey Package Manager](https://chocolatey.org/) is installed, use it to install minikube: - -```shell -choco install minikube -``` - -After it has installed, close the current CLI session and reopen it. minikube should have been added to your path automatically. -{{% /tab %}} -{{% /tabs %}} - -## Hypervisor Setup - -To check if virtualization is supported, run the following command on your Windows terminal or command prompt. - -```shell -systeminfo -``` -If you see the following output, virtualization is supported: - -```shell -Hyper-V Requirements: VM Monitor Mode Extensions: Yes - Virtualization Enabled In Firmware: Yes - Second Level Address Translation: Yes - Data Execution Prevention Available: Yes -``` - -If you see the following output, your system already has a Hypervisor installed and you can skip the next step. - -```shell -Hyper-V Requirements: A hypervisor has been detected. -``` - -{{% tabs %}} -{{% tab "Hyper-V" %}} -{{% readfile file="/docs/Reference/Drivers/includes/hyperv_usage.inc" %}} -{{% /tab %}} -{{% tab "VirtualBox" %}} -{{% readfile file="/docs/Reference/Drivers/includes/virtualbox_usage.inc" %}} -{{% /tab %}} -{{% /tabs %}} - -{{% readfile file="/docs/Start/includes/post_install.inc" %}} diff --git a/site/content/en/docs/Tasks/Registry/_index.md b/site/content/en/docs/Tasks/Registry/_index.md deleted file mode 100644 index cdecd6eff2..0000000000 --- a/site/content/en/docs/Tasks/Registry/_index.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: "Working with Registries" -linkTitle: "Working with Registries" -weight: 6 -date: 2017-01-05 -description: > - How to interact with Docker registries. ---- \ No newline at end of file diff --git a/site/content/en/docs/Tasks/Registry/private.md b/site/content/en/docs/Tasks/Registry/private.md deleted file mode 100644 index 270ad665e4..0000000000 --- a/site/content/en/docs/Tasks/Registry/private.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: "Private" -linkTitle: "Private" -weight: 6 -date: 2020-01-14 -description: > - How to use a private registry within minikube ---- - - -**GCR/ECR/ACR/Docker**: minikube has an addon, `registry-creds` which maps credentials into minikube to support pulling from Google Container Registry (GCR), Amazon's EC2 Container Registry (ECR), Azure Container Registry (ACR), and Private Docker registries. You will need to run `minikube addons configure registry-creds` and `minikube addons enable registry-creds` to get up and running. An example of this is below: - -```shell -$ minikube addons configure registry-creds -Do you want to enable AWS Elastic Container Registry? [y/n]: n - -Do you want to enable Google Container Registry? [y/n]: y --- Enter path to credentials (e.g. /home/user/.config/gcloud/application_default_credentials.json):/home/user/.config/gcloud/application_default_credentials.json - -Do you want to enable Docker Registry? [y/n]: n - -Do you want to enable Azure Container Registry? [y/n]: n -registry-creds was successfully configured -$ minikube addons enable registry-creds -``` - -For additional information on private container registries, see [this page](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/). - -We recommend you use _ImagePullSecrets_, but if you would like to configure access on the minikube VM you can place the `.dockercfg` in the `/home/docker` directory or the `config.json` in the `/var/lib/kubelet` directory. Make sure to restart your kubelet (for kubeadm) process with `sudo systemctl restart kubelet`. - diff --git a/site/content/en/docs/Tasks/_index.md b/site/content/en/docs/Tasks/_index.md deleted file mode 100755 index b5a2e67ad4..0000000000 --- a/site/content/en/docs/Tasks/_index.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: "Core Tasks" -linkTitle: "Core Tasks" -weight: 4 -date: 2017-01-05 -description: > - What can you do with minikube? ---- - -Each task should give the user - -* The prerequisites for this task, if any (this can be specified at the top of a multi-task page if they're the same for all the page's tasks. "All these tasks assume that you understand....and that you have already...."). -* What this task accomplishes. -* Instructions for the task. If it involves editing a file, running a command, or writing code, provide code-formatted example snippets to show the user what to do! If there are multiple steps, provide them as a numbered list. -* If appropriate, links to related concept, tutorial, or example pages. diff --git a/site/content/en/docs/Tasks/addons.md b/site/content/en/docs/Tasks/addons.md deleted file mode 100644 index 9cecd406bd..0000000000 --- a/site/content/en/docs/Tasks/addons.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -title: "Addons" -date: 2019-07-31 -weight: 4 -description: > - Using addons ---- - -minikube has a set of built-in addons that, when enabled, can be used within Kubernetes. - -## Available addons - -* [Kubernetes Dashboard](https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/dashboard) -* [EFK](https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/fluentd-elasticsearch) -* [Registry](https://github.com/kubernetes/minikube/tree/master/deploy/addons/registry) -* [Registry Credentials](https://github.com/upmc-enterprises/registry-creds) -* [Ingress](https://github.com/kubernetes/ingress-nginx) -* [Freshpod](https://github.com/GoogleCloudPlatform/freshpod) -* [nvidia-driver-installer](https://github.com/GoogleCloudPlatform/container-engine-accelerators/tree/master/nvidia-driver-installer/minikube) -* [nvidia-gpu-device-plugin](https://github.com/GoogleCloudPlatform/container-engine-accelerators/tree/master/cmd/nvidia_gpu) -* [logviewer](https://github.com/ivans3/minikube-log-viewer) -* [gvisor](../../../gvisor/readme/) -* [storage-provisioner-gluster](../../../storage-provisioner-gluster/readme) -* [helm-tiller](../../../helm-tiller/readme) -* [ingress-dns](../../../ingress-dns/readme) -* [istio](../../../istio/readme) - -## Listing available addons - -```shell -minikube addons list -``` - -Example output: - -``` -- registry: disabled -- registry-creds: disabled -- freshpod: disabled -- addon-manager: enabled -- dashboard: enabled -- efk: disabled -- ingress: disabled -- istio: disabled -- istio-provisioner: enabled -- default-storageclass: enabled -- storage-provisioner: enabled -- storage-provisioner-gluster: disabled -- nvidia-driver-installer: disabled -- nvidia-gpu-device-plugin: disabled -``` - -## Enabling an addon - -```shell -minikube addons enable -``` - -or - -```shell -minikube start --addons -``` - -## Interacting with an addon - -For addons that expose a browser endpoint, use: - -```shell -minikube addons open -``` - -## Disabling an addon - -```shell -minikube addons disable -``` - -## Custom Addons - -If you would like to have minikube properly start/restart custom addons, place the addon(s) _.yaml_ you wish to be launched with minikube in the `.minikube/addons` directory. Addons in this folder will be moved to the minikube VM and launched each time minikube is started/restarted. Learn [how to develop minikube addons]({{< ref "/docs/contributing/addons.en.md" >}}). diff --git a/site/content/en/docs/Tasks/building.md b/site/content/en/docs/Tasks/building.md deleted file mode 100644 index 0df3d6484c..0000000000 --- a/site/content/en/docs/Tasks/building.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: "Building images within minikube" -date: 2019-08-05 -weight: 1 -description: > - Building images within minikube ---- - -When using a single VM of Kubernetes it's really handy to build inside the VM; as this means you don't have to build on your host machine and push the image into a docker registry - you can just build inside the same machine as minikube which speeds up local experiments. - -## Docker (containerd) - -For Docker, you can either set up your host docker client to communicate by [reusing the docker daemon]({{< ref "/docs/tasks/docker_daemon.md" >}}). - -Or you can use `minikube ssh` to connect to the virtual machine, and run the `docker build` there: - -```shell -docker build -``` - -For more information on the `docker build` command, read the [Docker documentation](https://docs.docker.com/engine/reference/commandline/build/) (docker.com). - -## Podman (cri-o) - -For Podman, you can either set up your host `podman-remote` client to communicate with Podman service within minikube, by [reusing the Podman service]({{< ref "/docs/tasks/podman_service" >}}). - -Or you can use `minikube ssh`; you will also make sure to run the command as the root user: - -```shell -sudo -E podman build -``` - -For more information on the `podman build` command, read the [Podman documentation](https://github.com/containers/libpod/blob/master/docs/source/markdown/podman-build.1.md) (podman.io). - -## Build context - -For the build context you can use any directory on the virtual machine, or any address on the network. diff --git a/site/content/en/docs/Tasks/caching.md b/site/content/en/docs/Tasks/caching.md deleted file mode 100644 index 51b9d6d67b..0000000000 --- a/site/content/en/docs/Tasks/caching.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: "Caching images" -date: 2019-08-05 -weight: 1 -description: > - How to cache arbitrary Docker images ---- - -## Overview - -For offline use and performance reasons, minikube caches required Docker images onto the local file system. Developers may find it useful to add their own images to this cache for local development. - -## Adding an image - -To add the ubuntu 16.04 image to minikube's image cache: - -```shell -minikube cache add ubuntu:16.04 -``` - -The add command will store the requested image to `$MINIKUBE_HOME/cache/images`, and load it into the VM's container runtime environment next time `minikube start` is called. - -## Listing images - -To display images you have added to the cache: - -```shell -minikube cache list -``` - -This listing will not include the images which are built-in to minikube. - -## Deleting an image - -```shell -minikube cache delete -``` - -### Additional Information - -* [Reference: Disk Cache]({{< ref "/docs/reference/disk_cache.md" >}}) -* [Reference: cache command]({{< ref "/docs/reference/commands/cache.md" >}}) \ No newline at end of file diff --git a/site/content/en/docs/Tasks/docker_daemon.md b/site/content/en/docs/Tasks/docker_daemon.md deleted file mode 100644 index 2402099b53..0000000000 --- a/site/content/en/docs/Tasks/docker_daemon.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -title: "Using the Docker daemon" -linkTitle: "Using the Docker daemon" -weight: 6 -date: 2018-08-02 -description: > - How to access the Docker daemon within minikube ---- - -## Prerequisites - -You must be using minikube with the container runtime set to Docker. This is the default setting. - -## Method 1: Without minikube registry addon - -When using a single VM of Kubernetes it's really handy to reuse the Docker daemon inside the VM; as this means you don't have to build on your host machine and push the image into a docker registry - you can just build inside the same docker daemon as minikube which speeds up local experiments. - -To be able to work with the docker daemon on your mac/linux host use the docker-env command in your shell: - -```shell -eval $(minikube docker-env) -``` - -You should now be able to use docker on the command line on your host mac/linux machine talking to the docker daemon inside the minikube VM: - -```shell -docker ps -``` - -Remember to turn off the _imagePullPolicy:Always_, as otherwise Kubernetes won't use images you built locally. - -### Possible errors and solutions - -Docker may report following forbidden error if you are using http proxy and the `$(minikube ip)` is not added to `no_proxy`/`NO_PROXY`: - -``` -error during connect: Get https://192.168.39.98:2376/v1.39/containers/json: Forbidden -``` - -On Centos 7, docker may report the following error: - -``` -Could not read CA certificate "/etc/docker/ca.pem": open /etc/docker/ca.pem: no such file or directory -``` - -The fix is to update ``/etc/sysconfig/docker`` to ensure that minikube's environment changes are respected: - -```diff -< DOCKER_CERT_PATH=/etc/docker ---- -> if [ -z "${DOCKER_CERT_PATH}" ]; then -> DOCKER_CERT_PATH=/etc/docker -> fi -``` - -When you're using a docker installed via `snap` on a distribution like Ubuntu that uses AppArmor profiles the following error may appear: - -``` -could not read CA certificate "/home/USERNAME/.minikube/certs/ca.pem": open /home/USERNAME/.minikube/certs/ca.pem: permission denied -``` - -The solution is to allow docker to read the minikube certificates by adding a line in ``/var/lib/snapd/apparmor/profiles/snap.docker.docker`` file: - -```shell -# allow docker to read minikube certificates -owner @{HOME}/.minikube/certs/* r, -``` - -After that check for syntax errors and try again: - -```shell -sudo apparmor_parser -r /var/lib/snapd/apparmor/profiles/snap.docker.docker -eval $(minikube docker-env) -docker ps -``` - -## Related Documentation - -- [Using the Docker registry]({{< ref "/docs/tasks/docker_registry" >}}) diff --git a/site/content/en/docs/Tasks/docker_registry.md b/site/content/en/docs/Tasks/docker_registry.md deleted file mode 100644 index 64afca1c00..0000000000 --- a/site/content/en/docs/Tasks/docker_registry.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: "Using the Docker registry" -linkTitle: "Using the Docker registry" -weight: 6 -date: 2018-08-02 -description: > - How to access the Docker registry within minikube ---- - -As an alternative to [reusing the Docker daemon]({{< ref "/docs/tasks/docker_daemon.md" >}}), you may enable the registry addon to push images directly into registry. - -Steps are as follows: - -For illustration purpose, we will assume that minikube VM has one of the ip from `192.168.39.0/24` subnet. If you have not overridden these subnets as per [networking guide](https://minikube.sigs.k8s.io/docs/reference/networking/), you can find out default subnet being used by minikube for a specific OS and driver combination [here](https://github.com/kubernetes/minikube/blob/dfd9b6b83d0ca2eeab55588a16032688bc26c348/pkg/minikube/cluster/cluster.go#L408) which is subject to change. Replace `192.168.39.0/24` with appropriate values for your environment wherever applicable. - -Ensure that docker is configured to use `192.168.39.0/24` as insecure registry. Refer [here](https://docs.docker.com/registry/insecure/) for instructions. - -Ensure that `192.168.39.0/24` is enabled as insecure registry in minikube. Refer [here](https://minikube.sigs.k8s.io/docs/tasks/registry/insecure/) for instructions.. - -Enable minikube registry addon: - -```shell -minikube addons enable registry -``` - -Build docker image and tag it appropriately: - -```shell -docker build --tag $(minikube ip):5000/test-img . -``` - -Push docker image to minikube registry: - -```shell -docker push $(minikube ip):5000/test-img -``` - -Now run it in minikube: - -```shell -kubectl run test-img --image=$(minikube ip):5000/test-img -``` - -Or if `192.168.39.0/24` is not enabled as insecure registry in minikube, then: - -```shell -kubectl run test-img --image=localhost:5000/test-img -``` diff --git a/site/content/en/docs/Tasks/nodeport.md b/site/content/en/docs/Tasks/nodeport.md deleted file mode 100644 index 5e73db4cad..0000000000 --- a/site/content/en/docs/Tasks/nodeport.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: "NodePort access" -linkTitle: "NodePort access" -weight: 6 -date: 2018-08-02 -description: > - How to access a NodePort service in minikube ---- - -A NodePort service is the most basic way to get external traffic directly to your service. NodePort, as the name implies, opens a specific port, and any traffic that is sent to this port is forwarded to the service. - -### Getting the NodePort using the service command - -We also have a shortcut for fetching the minikube IP and a service's `NodePort`: - -`minikube service --url $SERVICE` - -## Getting the NodePort using kubectl - -The minikube VM is exposed to the host system via a host-only IP address, that can be obtained with the `minikube ip` command. Any services of type `NodePort` can be accessed over that IP address, on the NodePort. - -To determine the NodePort for your service, you can use a `kubectl` command like this (note that `nodePort` begins with lowercase `n` in JSON output): - -`kubectl get service $SERVICE --output='jsonpath="{.spec.ports[0].nodePort}"'` - -### Increasing the NodePort range - -By default, minikube only exposes ports 30000-32767. If this does not work for you, you can adjust the range by using: - -`minikube start --extra-config=apiserver.service-node-port-range=1-65535` - -This flag also accepts a comma separated list of ports and port ranges. - diff --git a/site/content/en/docs/Tasks/podman_service.md b/site/content/en/docs/Tasks/podman_service.md deleted file mode 100644 index 778388ac60..0000000000 --- a/site/content/en/docs/Tasks/podman_service.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: "Using the Podman service" -linkTitle: "Using the Podman service" -weight: 6 -date: 2020-01-20 -description: > - How to access the Podman service within minikube ---- - -## Prerequisites - -You should be using minikube with the container runtime set to CRI-O. It uses the same storage as Podman. - -## Method 1: Without minikube registry addon - -When using a single VM of Kubernetes it's really handy to reuse the Podman service inside the VM; as this means you don't have to build on your host machine and push the image into a container registry - you can just build inside the same container storage as minikube which speeds up local experiments. - -To be able to work with the podman client on your mac/linux host use the podman-env command in your shell: - -```shell -eval $(minikube podman-env) -``` - -You should now be able to use podman on the command line on your host mac/linux machine talking to the podman service inside the minikube VM: - -```shell -podman-remote help -``` - -Remember to turn off the `imagePullPolicy:Always` (use `imagePullPolicy:IfNotPresent` or `imagePullPolicy:Never`), as otherwise Kubernetes won't use images you built locally. - -### Example - -```shell -$ cat Containerfile -FROM busybox -CMD exec /bin/sh -c "trap : TERM INT; (while true; do sleep 1000; done) & wait" -$ eval $(minikube podman-env) -$ podman-remote build -t example.com/test:v1 . -STEP 1: FROM busybox -STEP 2: CMD exec /bin/sh -c "trap : TERM INT; (while true; do sleep 1000; done) & wait" -STEP 3: COMMIT example.com/test:v1 -2881381f7b9675ea5a0e635605bc0c4c08857582990bcadf0685b9f8976de2d3 -$ minikube ssh -- sudo crictl images example.com/test:v1 -IMAGE TAG IMAGE ID SIZE -example.com/test v1 2881381f7b967 1.44MB -$ kubectl run test --image example.com/test:v1 --image-pull-policy=IfNotPresent -kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead. -deployment.apps/test created -$ kubectl get pods -NAME READY STATUS RESTARTS AGE -test-d98bdbfdd-lwnqz 1/1 Running 0 18s - -``` - -## Related Documentation - -- [Using the Docker registry]({{< ref "/docs/tasks/docker_registry" >}}) diff --git a/site/content/en/docs/Tutorials/audit-policy.md b/site/content/en/docs/Tutorials/audit-policy.md deleted file mode 100644 index 56cf6428d2..0000000000 --- a/site/content/en/docs/Tutorials/audit-policy.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: "Audit Policy" -linkTitle: "Audit Policy" -weight: 1 -date: 2019-11-19 -description: > - Enabling audit policy for minikube ---- - -## Overview - -[Auditing](https://kubernetes.io/docs/tasks/debug-application-cluster/audit/) is not enabled in minikube by default. -This tutorial shows how to provide an [Audit Policy](https://kubernetes.io/docs/tasks/debug-application-cluster/audit/#audit-policy) file to the minikube API server on startup. - -## Tutorial - -```shell -minikube stop - -mkdir -p ~/.minikube/files/etc/ssl/certs - -cat < ~/.minikube/files/etc/ssl/certs/audit-policy.yaml -# Log all requests at the Metadata level. -apiVersion: audit.k8s.io/v1 -kind: Policy -rules: -- level: Metadata -EOF - -minikube start \ - --extra-config=apiserver.audit-policy-file=/etc/ssl/certs/audit-policy.yaml \ - --extra-config=apiserver.audit-log-path=- - -kubectl logs kube-apiserver-minikube -n kube-system | grep audit.k8s.io/v1 -``` - -The [Audit Policy](https://kubernetes.io/docs/tasks/debug-application-cluster/audit/#audit-policy) used in this tutorial is very minimal and quite verbose. As a next step you might want to finetune the `audit-policy.yaml` file. To get the changes applied you need to stop and start minikube. Restarting minikube triggers the [file sync mechanism](https://minikube.sigs.k8s.io/docs/tasks/sync/) that copies the yaml file onto the minikube node and causes the API server to read the changed policy file. - -Note: Currently there is no dedicated directory to store the `audit-policy.yaml` file in `~/.minikube/`. Using the `~/.minikube/files/etc/ssl/certs` directory is a workaround! This workaround works like this: By putting the file into a sub-directory of `~/.minikube/files/`, the [file sync mechanism](https://minikube.sigs.k8s.io/docs/tasks/sync/) gets triggered and copies the `audit-policy.yaml` file from the host onto the minikube node. When the API server container gets started by `kubeadm` I'll mount the `/etc/ssl/certs` directory from the minikube node into the container. This is the reason why the `audit-policy.yaml` file has to be stored in the ssl certs directory: It's one of the directories that get mounted from the minikube node into the container. diff --git a/site/content/en/docs/Tutorials/untrusted_root_certificate.md b/site/content/en/docs/Tutorials/untrusted_root_certificate.md deleted file mode 100644 index 77093d76f3..0000000000 --- a/site/content/en/docs/Tutorials/untrusted_root_certificate.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: "Untrusted Root Certificate" -linkTitle: "Untrusted Root Certificate" -weight: 1 -date: 2019-08-15 -description: > - Using minikube with Untrusted Root Certificate ---- - -## Overview - -Most organizations deploy their own Root Certificate and CA service inside the corporate networks. -Internal websites, image repositories and other resources may install SSL server certificates issued by this CA service for security and privacy concerns. -You may install the Root Certificate into the minikube VM to access these corporate resources within the cluster. - -## Prerequisites - -- Corporate X.509 Root Certificate -- Latest minikube binary and ISO - -## Tutorial - -* The certificate must be in PEM format. You may use `openssl` to convert from DER format. - -``` -openssl x509 -inform der -in my_company.cer -out my_company.pem -``` - -* You may need to delete existing minikube VM - -```shell -minikube delete -``` - -* Copy the certificate before creating the minikube VM - -```shell -mkdir -p $HOME/.minikube/certs -cp my_company.pem $HOME/.minikube/certs/my_company.pem - -minikube start -``` diff --git a/site/content/en/docs/_index.md b/site/content/en/docs/_index.md index f285e090f8..1a552191eb 100755 --- a/site/content/en/docs/_index.md +++ b/site/content/en/docs/_index.md @@ -1,11 +1,26 @@ - --- -title: "Documentation" +title: "Welcome!" linkTitle: "Documentation" -weight: 20 -menu: - main: - weight: 20 +no_list: true +weight: 1 +aliases: + - /docs/overview --- -This section is where the user documentation for minikube lives - all the information that users need to understand and successfully use minikube. +minikube quickly sets up a local Kubernetes cluster on macOS, Linux, and Windows. We proudly focus on helping application developers and new Kubernetes users. + +![Screenshot](/images/screenshot.png) + +## Highlights + +* Supports the latest Kubernetes release (+6 previous minor versions) +* Cross-platform (Linux, macOS, Windows) +* Deploy as a VM, a container, or on bare-metal +* Multiple container runtimes (CRI-O, containerd, docker) +* Docker API endpoint for blazing fast [image pushes](https://minikube.sigs.k8s.io/docs/handbook/pushing/#pushing-directly-to-the-in-cluster-docker-daemon) +* Advanced features such as [LoadBalancer](https://minikube.sigs.k8s.io/Handbook/loadbalancer/), filesystem mounts, and FeatureGates +* [Addons](https://minikube.sigs.k8s.io/Handbook/addons/) for easily installed Kubernetes applications + +## Survey + +We have a [fast 5-question survey](https://forms.gle/Gg3hG5ZySw8c1C24A) to learn how & why you are using minikube, and what improvements we should make. We would love to hear from you! 🙏 diff --git a/site/content/en/docs/Reference/Commands/_index.md b/site/content/en/docs/commands/_index.md similarity index 91% rename from site/content/en/docs/Reference/Commands/_index.md rename to site/content/en/docs/commands/_index.md index 889734bd1f..2ecd64a746 100644 --- a/site/content/en/docs/Reference/Commands/_index.md +++ b/site/content/en/docs/commands/_index.md @@ -1,7 +1,7 @@ --- title: "Commands" linkTitle: "Commands" -weight: 1 +weight: 5 date: 2019-08-01 description: > minikube command reference diff --git a/site/content/en/docs/commands/addons.md b/site/content/en/docs/commands/addons.md new file mode 100644 index 0000000000..ce54b0967e --- /dev/null +++ b/site/content/en/docs/commands/addons.md @@ -0,0 +1,239 @@ +--- +title: "addons" +description: > + Modify minikube's kubernetes addons +--- + + + +## minikube addons + +Modify minikube's kubernetes addons + +### Synopsis + +addons modifies minikube addons files using subcommands like "minikube addons enable dashboard" + +``` +minikube addons SUBCOMMAND [flags] +``` + +### Options + +``` + -h, --help help for addons +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + +## minikube addons configure + +Configures the addon w/ADDON_NAME within minikube (example: minikube addons configure registry-creds). For a list of available addons use: minikube addons list + +### Synopsis + +Configures the addon w/ADDON_NAME within minikube (example: minikube addons configure registry-creds). For a list of available addons use: minikube addons list + +``` +minikube addons configure ADDON_NAME [flags] +``` + +### Options + +``` + -h, --help help for configure +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + +## minikube addons disable + +Disables the addon w/ADDON_NAME within minikube (example: minikube addons disable dashboard). For a list of available addons use: minikube addons list + +### Synopsis + +Disables the addon w/ADDON_NAME within minikube (example: minikube addons disable dashboard). For a list of available addons use: minikube addons list + +``` +minikube addons disable ADDON_NAME [flags] +``` + +### Options + +``` + -h, --help help for disable +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + +## minikube addons enable + +Enables the addon w/ADDON_NAME within minikube (example: minikube addons enable dashboard). For a list of available addons use: minikube addons list + +### Synopsis + +Enables the addon w/ADDON_NAME within minikube (example: minikube addons enable dashboard). For a list of available addons use: minikube addons list + +``` +minikube addons enable ADDON_NAME [flags] +``` + +### Options + +``` + -h, --help help for enable +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + +## minikube addons help + +Help about any command + +### Synopsis + +Help provides help for any command in the application. +Simply type addons help [path to command] for full details. + +``` +minikube addons help [command] [flags] +``` + +### Options + +``` + -h, --help help for help +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + +## minikube addons list + +Lists all available minikube addons as well as their current statuses (enabled/disabled) + +### Synopsis + +Lists all available minikube addons as well as their current statuses (enabled/disabled) + +``` +minikube addons list [flags] +``` + +### Options + +``` + -h, --help help for list + -o, --output string minikube addons list --output OUTPUT. json, list (default "list") +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + +## minikube addons open + +Opens the addon w/ADDON_NAME within minikube (example: minikube addons open dashboard). For a list of available addons use: minikube addons list + +### Synopsis + +Opens the addon w/ADDON_NAME within minikube (example: minikube addons open dashboard). For a list of available addons use: minikube addons list + +``` +minikube addons open ADDON_NAME [flags] +``` + +### Options + +``` + --format string Format to output addons URL in. This format will be applied to each url individually and they will be printed one at a time. (default "http://{{.IP}}:{{.Port}}") + -h, --help help for open + --https Open the addons URL with https instead of http + --interval int The time interval for each check that wait performs in seconds (default 1) + --url Display the kubernetes addons URL in the CLI instead of opening it in the default browser + --wait int Amount of time to wait for service in seconds (default 2) +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + diff --git a/site/content/en/docs/commands/cache.md b/site/content/en/docs/commands/cache.md new file mode 100644 index 0000000000..d3e3cd1b9f --- /dev/null +++ b/site/content/en/docs/commands/cache.md @@ -0,0 +1,199 @@ +--- +title: "cache" +description: > + Add or delete an image from the local cache. +--- + + + +## minikube cache + +Add or delete an image from the local cache. + +### Synopsis + +Add or delete an image from the local cache. + +### Options + +``` + -h, --help help for cache +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + +## minikube cache add + +Add an image to local cache. + +### Synopsis + +Add an image to local cache. + +``` +minikube cache add [flags] +``` + +### Options + +``` + -h, --help help for add +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + +## minikube cache delete + +Delete an image from the local cache. + +### Synopsis + +Delete an image from the local cache. + +``` +minikube cache delete [flags] +``` + +### Options + +``` + -h, --help help for delete +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + +## minikube cache help + +Help about any command + +### Synopsis + +Help provides help for any command in the application. +Simply type cache help [path to command] for full details. + +``` +minikube cache help [command] [flags] +``` + +### Options + +``` + -h, --help help for help +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + +## minikube cache list + +List all available images from the local cache. + +### Synopsis + +List all available images from the local cache. + +``` +minikube cache list [flags] +``` + +### Options + +``` + --format string Go template format string for the cache list output. The format for Go templates can be found here: https://golang.org/pkg/text/template/ + For the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#CacheListTemplate (default "{{.CacheImage}}\n") + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + +## minikube cache reload + +reload cached images. + +### Synopsis + +reloads images previously added using the 'cache add' subcommand + +``` +minikube cache reload [flags] +``` + +### Options + +``` + -h, --help help for reload +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + diff --git a/site/content/en/docs/Reference/Commands/completion.md b/site/content/en/docs/commands/completion.md similarity index 58% rename from site/content/en/docs/Reference/Commands/completion.md rename to site/content/en/docs/commands/completion.md index 11fa3a822f..38bec17318 100644 --- a/site/content/en/docs/Reference/Commands/completion.md +++ b/site/content/en/docs/commands/completion.md @@ -1,46 +1,48 @@ --- title: "completion" -linkTitle: "completion" -weight: 1 -date: 2019-08-01 description: > Outputs minikube shell completion for the given shell (bash or zsh) --- -### Overview + +## minikube completion Outputs minikube shell completion for the given shell (bash or zsh) -This depends on the bash-completion binary. Example installation instructions: +### Synopsis + + + Outputs minikube shell completion for the given shell (bash or zsh) + + This depends on the bash-completion binary. Example installation instructions: + OS X: + $ brew install bash-completion + $ source $(brew --prefix)/etc/bash_completion + $ minikube completion bash > ~/.minikube-completion # for bash users + $ minikube completion zsh > ~/.minikube-completion # for zsh users + $ source ~/.minikube-completion + Ubuntu: + $ apt-get install bash-completion + $ source /etc/bash-completion + $ source <(minikube completion bash) # for bash users + $ source <(minikube completion zsh) # for zsh users + + Additionally, you may want to output the completion to a file and source in your .bashrc + + Note for zsh users: [1] zsh completions are only supported in versions of zsh >= 5.2 -### Usage ``` minikube completion SHELL [flags] ``` -## Example: macOS +### Options -```shell -brew install bash-completion -source $(brew --prefix)/etc/bash_completion -minikube completion bash > ~/.minikube-completion # for bash users -$ minikube completion zsh > ~/.minikube-completion # for zsh users -$ source ~/.minikube-completion ``` - -## Example: Ubuntu - -```shell -apt-get install bash-completion -source /etc/bash-completion -source <(minikube completion bash) # for bash users -source <(minikube completion zsh) # for zsh users + -h, --help help for completion ``` -Additionally, you may want to output the completion to a file and source in your .bashrc - ### Options inherited from parent commands ``` @@ -54,3 +56,4 @@ Additionally, you may want to output the completion to a file and source in your -v, --v Level log level for V logs --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging ``` + diff --git a/site/content/en/docs/commands/config.md b/site/content/en/docs/commands/config.md new file mode 100644 index 0000000000..2775a6e4ae --- /dev/null +++ b/site/content/en/docs/commands/config.md @@ -0,0 +1,234 @@ +--- +title: "config" +description: > + Modify minikube config +--- + + + +## minikube config + +Modify minikube config + +### Synopsis + +config modifies minikube config files using subcommands like "minikube config set driver kvm" +Configurable fields: + + * driver + * vm-driver + * container-runtime + * feature-gates + * v + * cpus + * disk-size + * host-only-cidr + * memory + * log_dir + * kubernetes-version + * iso-url + * WantUpdateNotification + * ReminderWaitPeriodInHours + * WantReportError + * WantReportErrorPrompt + * WantKubectlDownloadMsg + * WantNoneDriverWarning + * profile + * bootstrapper + * ShowDriverDeprecationNotification + * ShowBootstrapperDeprecationNotification + * insecure-registry + * hyperv-virtual-switch + * disable-driver-mounts + * cache + * embed-certs + * native-ssh + +``` +minikube config SUBCOMMAND [flags] +``` + +### Options + +``` + -h, --help help for config +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + +## minikube config get + +Gets the value of PROPERTY_NAME from the minikube config file + +### Synopsis + +Returns the value of PROPERTY_NAME from the minikube config file. Can be overwritten at runtime by flags or environmental variables. + +``` +minikube config get PROPERTY_NAME [flags] +``` + +### Options + +``` + -h, --help help for get +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + +## minikube config help + +Help about any command + +### Synopsis + +Help provides help for any command in the application. +Simply type config help [path to command] for full details. + +``` +minikube config help [command] [flags] +``` + +### Options + +``` + -h, --help help for help +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + +## minikube config set + +Sets an individual value in a minikube config file + +### Synopsis + +Sets the PROPERTY_NAME config value to PROPERTY_VALUE + These values can be overwritten by flags or environment variables at runtime. + +``` +minikube config set PROPERTY_NAME PROPERTY_VALUE [flags] +``` + +### Options + +``` + -h, --help help for set +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + +## minikube config unset + +unsets an individual value in a minikube config file + +### Synopsis + +unsets PROPERTY_NAME from the minikube config file. Can be overwritten by flags or environmental variables + +``` +minikube config unset PROPERTY_NAME [flags] +``` + +### Options + +``` + -h, --help help for unset +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + +## minikube config view + +Display values currently set in the minikube config file + +### Synopsis + +Display values currently set in the minikube config file. + +``` +minikube config view [flags] +``` + +### Options + +``` + --format string Go template format string for the config view output. The format for Go templates can be found here: https://golang.org/pkg/text/template/ + For the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate (default "- {{.ConfigKey}}: {{.ConfigValue}}\n") + -h, --help help for view +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + diff --git a/site/content/en/docs/Reference/Commands/dashboard.md b/site/content/en/docs/commands/dashboard.md similarity index 79% rename from site/content/en/docs/Reference/Commands/dashboard.md rename to site/content/en/docs/commands/dashboard.md index 8bd21037d3..e2aafc4e08 100644 --- a/site/content/en/docs/Reference/Commands/dashboard.md +++ b/site/content/en/docs/commands/dashboard.md @@ -1,26 +1,31 @@ --- title: "dashboard" -linkTitle: "dashboard" -weight: 1 -date: 2019-08-01 description: > - Access the Kubernetes dashboard running within the minikube cluster + Access the kubernetes dashboard running within the minikube cluster --- -## Usage + + +## minikube dashboard + +Access the kubernetes dashboard running within the minikube cluster + +### Synopsis + +Access the kubernetes dashboard running within the minikube cluster ``` minikube dashboard [flags] ``` -## Options +### Options ``` -h, --help help for dashboard --url Display dashboard URL instead of opening a browser ``` -## Options inherited from parent commands +### Options inherited from parent commands ``` --alsologtostderr log to standard error as well as files @@ -33,3 +38,4 @@ minikube dashboard [flags] -v, --v Level log level for V logs --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging ``` + diff --git a/site/content/en/docs/Reference/Commands/logs.md b/site/content/en/docs/commands/delete.md similarity index 68% rename from site/content/en/docs/Reference/Commands/logs.md rename to site/content/en/docs/commands/delete.md index 595c7055e1..f343bf524f 100644 --- a/site/content/en/docs/Reference/Commands/logs.md +++ b/site/content/en/docs/commands/delete.md @@ -1,25 +1,30 @@ --- -title: "logs" -linkTitle: "logs" -weight: 1 -date: 2019-08-01 +title: "delete" description: > - Gets the logs of the running instance, used for debugging minikube, not user code + Deletes a local kubernetes cluster --- -## Usage + + +## minikube delete + +Deletes a local kubernetes cluster + +### Synopsis + +Deletes a local kubernetes cluster. This command deletes the VM, and removes all +associated files. ``` -minikube logs [flags] +minikube delete [flags] ``` ### Options ``` - -f, --follow Show only the most recent journal entries, and continuously print new entries as they are appended to the journal. - -h, --help help for logs - -n, --length int Number of lines back to go within the log (default 60) - --problems Show only log entries which point to known problems + --all Set flag to delete all profiles + -h, --help help for delete + --purge Set this flag to delete the '.minikube' folder from your user directory. ``` ### Options inherited from parent commands @@ -35,3 +40,4 @@ minikube logs [flags] -v, --v Level log level for V logs --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging ``` + diff --git a/site/content/en/docs/Reference/Commands/docker-env.md b/site/content/en/docs/commands/docker-env.md similarity index 89% rename from site/content/en/docs/Reference/Commands/docker-env.md rename to site/content/en/docs/commands/docker-env.md index 2260564680..188051a6e1 100644 --- a/site/content/en/docs/Reference/Commands/docker-env.md +++ b/site/content/en/docs/commands/docker-env.md @@ -1,13 +1,18 @@ --- title: "docker-env" -linkTitle: "docker-env" -weight: 1 -date: 2019-08-01 description: > Sets up docker env variables; similar to '$(docker-machine env)' --- -### Usage + + +## minikube docker-env + +Sets up docker env variables; similar to '$(docker-machine env)' + +### Synopsis + +Sets up docker env variables; similar to '$(docker-machine env)'. ``` minikube docker-env [flags] @@ -35,3 +40,4 @@ minikube docker-env [flags] -v, --v Level log level for V logs --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging ``` + diff --git a/site/content/en/docs/Reference/Commands/delete.md b/site/content/en/docs/commands/help.md similarity index 56% rename from site/content/en/docs/Reference/Commands/delete.md rename to site/content/en/docs/commands/help.md index eabf82953a..717ec0445e 100644 --- a/site/content/en/docs/Reference/Commands/delete.md +++ b/site/content/en/docs/commands/help.md @@ -1,45 +1,28 @@ --- -title: "delete" -linkTitle: "delete" -weight: 1 -date: 2019-08-01 +title: "help" description: > - Deletes a local Kubernetes cluster + Help about any command --- -### Overview -Deletes a local Kubernetes cluster. This command deletes the VM, and removes all -associated files. -## Usage +## minikube help + +Help about any command + +### Synopsis + +Help provides help for any command in the application. +Simply type minikube help [path to command] for full details. ``` -minikube delete [flags] +minikube help [command] [flags] ``` -##### Delete all profiles -``` -minikube delete --all -``` - -##### Delete profile & `.minikube` directory -Do note that the following command only works if you have only 1 profile. If there are multiple profiles, the command will error out. -``` -minikube delete --purge -``` - -##### Delete all profiles & `.minikube` directory -This will delete all the profiles and `.minikube` directory. -``` -minikube delete --purge --all -``` - -### Flags +### Options ``` - --all: Set flag to delete all profiles - --purge: Set this flag to delete the '.minikube' folder from your user directory. + -h, --help help for help ``` ### Options inherited from parent commands @@ -55,3 +38,4 @@ minikube delete --purge --all -v, --v Level log level for V logs --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging ``` + diff --git a/site/content/en/docs/Reference/Commands/ip.md b/site/content/en/docs/commands/ip.md similarity index 90% rename from site/content/en/docs/Reference/Commands/ip.md rename to site/content/en/docs/commands/ip.md index 020fe0ed65..188312dad3 100644 --- a/site/content/en/docs/Reference/Commands/ip.md +++ b/site/content/en/docs/commands/ip.md @@ -1,22 +1,29 @@ --- title: "ip" -linkTitle: "ip" -weight: 1 -date: 2019-08-01 description: > Retrieves the IP address of the running cluster --- -### Overview + + +## minikube ip + +Retrieves the IP address of the running cluster + +### Synopsis Retrieves the IP address of the running cluster, and writes it to STDOUT. -### Usage - ``` minikube ip [flags] ``` +### Options + +``` + -h, --help help for ip +``` + ### Options inherited from parent commands ``` @@ -30,3 +37,4 @@ minikube ip [flags] -v, --v Level log level for V logs --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging ``` + diff --git a/site/content/en/docs/Reference/Commands/kubectl.md b/site/content/en/docs/commands/kubectl.md similarity index 86% rename from site/content/en/docs/Reference/Commands/kubectl.md rename to site/content/en/docs/commands/kubectl.md index cfca7ec2a4..6699f257a4 100644 --- a/site/content/en/docs/Reference/Commands/kubectl.md +++ b/site/content/en/docs/commands/kubectl.md @@ -1,32 +1,33 @@ --- title: "kubectl" -linkTitle: "kubectl" -weight: 1 -date: 2019-08-01 description: > Run kubectl --- -### Overview -Run the Kubernetes client, download it if necessary. -Remember `--` after kubectl! +## minikube kubectl -### Usage +Run kubectl + +### Synopsis + +Run the kubernetes client, download it if necessary. Remember -- after kubectl! + +Examples: +minikube kubectl -- --help +minikube kubectl -- get pods --namespace kube-system ``` minikube kubectl [flags] ``` -### Examples: +### Options ``` -minikube kubectl -- --help -minikube kubectl -- get pods --namespace kube-system + -h, --help help for kubectl ``` - ### Options inherited from parent commands ``` @@ -40,3 +41,4 @@ minikube kubectl -- get pods --namespace kube-system -v, --v Level log level for V logs --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging ``` + diff --git a/site/content/en/docs/commands/logs.md b/site/content/en/docs/commands/logs.md new file mode 100644 index 0000000000..097a87b3c4 --- /dev/null +++ b/site/content/en/docs/commands/logs.md @@ -0,0 +1,44 @@ +--- +title: "logs" +description: > + Gets the logs of the running instance, used for debugging minikube, not user code. +--- + + + +## minikube logs + +Gets the logs of the running instance, used for debugging minikube, not user code. + +### Synopsis + +Gets the logs of the running instance, used for debugging minikube, not user code. + +``` +minikube logs [flags] +``` + +### Options + +``` + -f, --follow Show only the most recent journal entries, and continuously print new entries as they are appended to the journal. + -h, --help help for logs + -n, --length int Number of lines back to go within the log (default 60) + --node string The node to get logs from. Defaults to the primary control plane. + --problems Show only log entries which point to known problems +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + diff --git a/site/content/en/docs/Reference/Commands/mount.md b/site/content/en/docs/commands/mount.md similarity index 93% rename from site/content/en/docs/Reference/Commands/mount.md rename to site/content/en/docs/commands/mount.md index fa85b70922..afc671e778 100644 --- a/site/content/en/docs/Reference/Commands/mount.md +++ b/site/content/en/docs/commands/mount.md @@ -1,13 +1,18 @@ --- title: "mount" -linkTitle: "mount" -weight: 1 -date: 2019-08-01 description: > Mounts the specified directory into minikube --- -### Usage + + +## minikube mount + +Mounts the specified directory into minikube + +### Synopsis + +Mounts the specified directory into minikube. ``` minikube mount [flags] : @@ -41,3 +46,4 @@ minikube mount [flags] : -v, --v Level log level for V logs --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging ``` + diff --git a/site/content/en/docs/commands/node.md b/site/content/en/docs/commands/node.md new file mode 100644 index 0000000000..9aec995f71 --- /dev/null +++ b/site/content/en/docs/commands/node.md @@ -0,0 +1,207 @@ +--- +title: "node" +description: > + Node operations +--- + + + +## minikube node + +Node operations + +### Synopsis + +Operations on nodes + +``` +minikube node [flags] +``` + +### Options + +``` + -h, --help help for node +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + +## minikube node add + +Adds a node to the given cluster. + +### Synopsis + +Adds a node to the given cluster config, and starts it. + +``` +minikube node add [flags] +``` + +### Options + +``` + --control-plane If true, the node added will also be a control plane in addition to a worker. + --delete-on-failure If set, delete the current cluster if start fails and try again. Defaults to false. + -h, --help help for add + --worker If true, the added node will be marked for work. Defaults to true. (default true) +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + +## minikube node delete + +Deletes a node from a cluster. + +### Synopsis + +Deletes a node from a cluster. + +``` +minikube node delete [flags] +``` + +### Options + +``` + -h, --help help for delete +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + +## minikube node help + +Help about any command + +### Synopsis + +Help provides help for any command in the application. +Simply type node help [path to command] for full details. + +``` +minikube node help [command] [flags] +``` + +### Options + +``` + -h, --help help for help +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + +## minikube node start + +Starts a node. + +### Synopsis + +Starts an existing stopped node in a cluster. + +``` +minikube node start [flags] +``` + +### Options + +``` + --delete-on-failure If set, delete the current cluster if start fails and try again. Defaults to false. + -h, --help help for start + --name string The name of the node to start +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + +## minikube node stop + +Stops a node in a cluster. + +### Synopsis + +Stops a node in a cluster. + +``` +minikube node stop [flags] +``` + +### Options + +``` + -h, --help help for stop + --name string The name of the node to delete +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + diff --git a/site/content/en/docs/commands/options.md b/site/content/en/docs/commands/options.md new file mode 100644 index 0000000000..9536ffb349 --- /dev/null +++ b/site/content/en/docs/commands/options.md @@ -0,0 +1,40 @@ +--- +title: "options" +description: > + Show a list of global command-line options (applies to all commands). +--- + + + +## minikube options + +Show a list of global command-line options (applies to all commands). + +### Synopsis + +Show a list of global command-line options (applies to all commands). + +``` +minikube options [flags] +``` + +### Options + +``` + -h, --help help for options +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + diff --git a/site/content/en/docs/Reference/Commands/pause.md b/site/content/en/docs/commands/pause.md similarity index 65% rename from site/content/en/docs/Reference/Commands/pause.md rename to site/content/en/docs/commands/pause.md index 5850a5617d..7a6bc5cbcb 100644 --- a/site/content/en/docs/Reference/Commands/pause.md +++ b/site/content/en/docs/commands/pause.md @@ -1,19 +1,18 @@ --- title: "pause" -linkTitle: "pause" -weight: 1 -date: 2020-02-05 description: > - pause the Kubernetes control plane or other namespaces + pause containers --- -### Overview -The pause command allows you to freeze containers using the Linux [cgroup freezer](https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt). Once frozen, processes will no longer consume CPU cycles, but will remain in memory. -By default, the pause command will pause the Kubernetes control plane (kube-system namespace), leaving your applications running. This reduces the background CPU usage of a minikube cluster to a negligible 2-3% of a CPU. +## minikube pause -### Usage +pause containers + +### Synopsis + +pause containers ``` minikube pause [flags] @@ -31,6 +30,7 @@ minikube pause [flags] ``` --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) --log_dir string If non-empty, write log files in this directory --logtostderr log to standard error instead of files @@ -40,7 +40,3 @@ minikube pause [flags] --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging ``` -### SEE ALSO - -* [unpause](unpause.md) - diff --git a/site/content/en/docs/commands/podman-env.md b/site/content/en/docs/commands/podman-env.md new file mode 100644 index 0000000000..ab5e7769a8 --- /dev/null +++ b/site/content/en/docs/commands/podman-env.md @@ -0,0 +1,42 @@ +--- +title: "podman-env" +description: > + Sets up podman env variables; similar to '$(podman-machine env)' +--- + + + +## minikube podman-env + +Sets up podman env variables; similar to '$(podman-machine env)' + +### Synopsis + +Sets up podman env variables; similar to '$(podman-machine env)'. + +``` +minikube podman-env [flags] +``` + +### Options + +``` + -h, --help help for podman-env + --shell string Force environment to be configured for a specified shell: [fish, cmd, powershell, tcsh, bash, zsh], default is auto-detect + -u, --unset Unset variables instead of setting them +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + diff --git a/site/content/en/docs/commands/profile.md b/site/content/en/docs/commands/profile.md new file mode 100644 index 0000000000..8dc67629c6 --- /dev/null +++ b/site/content/en/docs/commands/profile.md @@ -0,0 +1,106 @@ +--- +title: "profile" +description: > + Profile gets or sets the current minikube profile +--- + + + +## minikube profile + +Profile gets or sets the current minikube profile + +### Synopsis + +profile sets the current minikube profile, or gets the current profile if no arguments are provided. This is used to run and manage multiple minikube instance. You can return to the default minikube profile by running `minikube profile default` + +``` +minikube profile [MINIKUBE_PROFILE_NAME]. You can return to the default minikube profile by running `minikube profile default` [flags] +``` + +### Options + +``` + -h, --help help for profile +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + +## minikube profile help + +Help about any command + +### Synopsis + +Help provides help for any command in the application. +Simply type profile help [path to command] for full details. + +``` +minikube profile help [command] [flags] +``` + +### Options + +``` + -h, --help help for help +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + +## minikube profile list + +Lists all minikube profiles. + +### Synopsis + +Lists all valid minikube profiles and detects all possible invalid profiles. + +``` +minikube profile list [flags] +``` + +### Options + +``` + -h, --help help for list + -o, --output string The output format. One of 'json', 'table' (default "table") +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + diff --git a/site/content/en/docs/commands/service.md b/site/content/en/docs/commands/service.md new file mode 100644 index 0000000000..7b95e7b85e --- /dev/null +++ b/site/content/en/docs/commands/service.md @@ -0,0 +1,114 @@ +--- +title: "service" +description: > + Gets the kubernetes URL(s) for the specified service in your local cluster +--- + + + +## minikube service + +Gets the kubernetes URL(s) for the specified service in your local cluster + +### Synopsis + +Gets the kubernetes URL(s) for the specified service in your local cluster. In the case of multiple URLs they will be printed one at a time. + +``` +minikube service [flags] SERVICE +``` + +### Options + +``` + --format string Format to output service URL in. This format will be applied to each url individually and they will be printed one at a time. (default "http://{{.IP}}:{{.Port}}") + -h, --help help for service + --https Open the service URL with https instead of http + --interval int The initial time interval for each check that wait performs in seconds (default 1) + -n, --namespace string The service namespace (default "default") + --url Display the kubernetes service URL in the CLI instead of opening it in the default browser + --wait int Amount of time to wait for a service in seconds (default 2) +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + +## minikube service help + +Help about any command + +### Synopsis + +Help provides help for any command in the application. +Simply type service help [path to command] for full details. + +``` +minikube service help [command] [flags] +``` + +### Options + +``` + -h, --help help for help +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --format string Format to output service URL in. This format will be applied to each url individually and they will be printed one at a time. (default "http://{{.IP}}:{{.Port}}") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + +## minikube service list + +Lists the URLs for the services in your local cluster + +### Synopsis + +Lists the URLs for the services in your local cluster + +``` +minikube service list [flags] +``` + +### Options + +``` + -h, --help help for list + -n, --namespace string The services namespace +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --format string Format to output service URL in. This format will be applied to each url individually and they will be printed one at a time. (default "http://{{.IP}}:{{.Port}}") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + diff --git a/site/content/en/docs/Reference/Commands/ssh-key.md b/site/content/en/docs/commands/ssh-key.md similarity index 84% rename from site/content/en/docs/Reference/Commands/ssh-key.md rename to site/content/en/docs/commands/ssh-key.md index 271c736878..dba64aadbf 100644 --- a/site/content/en/docs/Reference/Commands/ssh-key.md +++ b/site/content/en/docs/commands/ssh-key.md @@ -1,18 +1,29 @@ --- title: "ssh-key" -linkTitle: "sshs-key" -weight: 1 -date: 2019-08-01 description: > Retrieve the ssh identity key path of the specified cluster --- -### Usage + + +## minikube ssh-key + +Retrieve the ssh identity key path of the specified cluster + +### Synopsis + +Retrieve the ssh identity key path of the specified cluster. ``` minikube ssh-key [flags] ``` +### Options + +``` + -h, --help help for ssh-key +``` + ### Options inherited from parent commands ``` @@ -26,3 +37,4 @@ minikube ssh-key [flags] -v, --v Level log level for V logs --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging ``` + diff --git a/site/content/en/docs/commands/ssh.md b/site/content/en/docs/commands/ssh.md new file mode 100644 index 0000000000..d1d99ebe69 --- /dev/null +++ b/site/content/en/docs/commands/ssh.md @@ -0,0 +1,42 @@ +--- +title: "ssh" +description: > + Log into or run a command on a machine with SSH; similar to 'docker-machine ssh' +--- + + + +## minikube ssh + +Log into or run a command on a machine with SSH; similar to 'docker-machine ssh' + +### Synopsis + +Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'. + +``` +minikube ssh [flags] +``` + +### Options + +``` + -h, --help help for ssh + --native-ssh Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'. (default true) + -n, --node string The node to ssh into. Defaults to the primary control plane. +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + diff --git a/site/content/en/docs/Reference/Commands/start.md b/site/content/en/docs/commands/start.md similarity index 70% rename from site/content/en/docs/Reference/Commands/start.md rename to site/content/en/docs/commands/start.md index 4f736678ee..839be1e167 100644 --- a/site/content/en/docs/Reference/Commands/start.md +++ b/site/content/en/docs/commands/start.md @@ -1,13 +1,18 @@ --- title: "start" -linkTitle: "start" -weight: 1 -date: 2019-08-01 description: > - Starts a local Kubernetes cluster + Starts a local kubernetes cluster --- -### Usage + + +## minikube start + +Starts a local kubernetes cluster + +### Synopsis + +Starts a local kubernetes cluster ``` minikube start [flags] @@ -16,16 +21,17 @@ minikube start [flags] ### Options ``` - --addons minikube addons list Enable addons. see minikube addons list for a list of valid addon names. + --addons minikube addons list Enable addons. see minikube addons list for a list of valid addon names. --apiserver-ips ipSlice A set of apiserver IP Addresses which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine (default []) - --apiserver-name string The apiserver name which is used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine (default "minikubeCA") + --apiserver-name string The authoritative apiserver hostname for apiserver certificates and connectivity. This can be used if you want to make the apiserver available from outside the machine (default "minikubeCA") --apiserver-names stringArray A set of apiserver names which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine --apiserver-port int The apiserver listening port (default 8443) --auto-update-drivers If set, automatically updates drivers to the latest version. Defaults to true. (default true) - --cache-images If true, cache docker images for the current bootstrapper and load them into the machine. Always false with --vm-driver=none. (default true) + --cache-images If true, cache docker images for the current bootstrapper and load them into the machine. Always false with --driver=none. (default true) --container-runtime string The container runtime to be used (docker, crio, containerd). (default "docker") - --cpus int Number of CPUs allocated to the minikube VM. (default 2) + --cpus int Number of CPUs allocated to Kubernetes. (default 2) --cri-socket string The cri socket path to be used. + --delete-on-failure If set, delete the current cluster if start fails and try again. Defaults to false. --disable-driver-mounts Disables the filesystem mounts provided by the hypervisors --disk-size string Disk size allocated to the minikube VM (format: [], where unit = b, k, m or g). (default "20000mb") --dns-domain string The cluster dns domain name used in the kubernetes cluster (default "cluster.local") @@ -33,17 +39,14 @@ minikube start [flags] --docker-env stringArray Environment variables to pass to the Docker daemon. (format: key=value) --docker-opt stringArray Specify arbitrary flags to pass to the Docker daemon. (format: key=value) --download-only If true, only download and cache files for later use - don't install or start anything. + --driver string Used to specify the driver to run kubernetes in. The list of available drivers depends on operating system. --dry-run dry-run mode. Validates configuration, but does not mutate system state --embed-certs if true, will embed the certs in kubeconfig. --enable-default-cni Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with "--network-plugin=cni". --extra-config ExtraOption A set of key=value pairs that describe configuration that may be passed to different components. - - The key should be '.' separated, and the first part before the dot is the component to apply the configuration to. - - Valid components are: kubelet, kubeadm, apiserver, controller-manager, etcd, kube-proxy, scheduler - - Valid kubeadm parameters: ignore-preflight-errors, dry-run, kubeconfig, kubeconfig-dir, node-name, cri-socket, experimental-upload-certs, certificate-key, rootfs, skip-phases, pod-network-cidr - + The key should be '.' separated, and the first part before the dot is the component to apply the configuration to. + Valid components are: kubelet, kubeadm, apiserver, controller-manager, etcd, proxy, scheduler + Valid kubeadm parameters: ignore-preflight-errors, dry-run, kubeconfig, kubeconfig-dir, node-name, cri-socket, experimental-upload-certs, certificate-key, rootfs, skip-phases, pod-network-cidr --feature-gates string A set of key=value pairs that describe feature gates for alpha/experimental features. --force Force minikube to perform possibly dangerous operations -h, --help help for start @@ -52,33 +55,40 @@ minikube start [flags] --host-only-nic-type string NIC Type used for host only network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only) (default "virtio") --hyperkit-vpnkit-sock string Location of the VPNKit socket used for networking. If empty, disables Hyperkit VPNKitSock, if 'auto' uses Docker for Mac VPNKit connection, otherwise uses the specified VSock (hyperkit driver only) --hyperkit-vsock-ports strings List of guest VSock ports that should be exposed as sockets on the host (hyperkit driver only) + --hyperv-external-adapter string External Adapter on which external switch will be created if no external switch is found. (hyperv driver only) + --hyperv-use-external-switch Whether to use external switch over Default Switch if virtual switch not explicitly specified. (hyperv driver only) --hyperv-virtual-switch string The hyperv virtual switch name. Defaults to first found. (hyperv driver only) --image-mirror-country string Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn. --image-repository string Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to "auto" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers --insecure-registry strings Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added. + --install-addons If set, install addons. Defaults to true. (default true) --interactive Allow user prompts for more information (default true) - --iso-url string Location of the minikube iso. (default "https://storage.googleapis.com/minikube/iso/minikube-v1.7.0.iso") + --iso-url strings Locations to fetch the minikube ISO from. (default [https://storage.googleapis.com/minikube/iso/minikube-v1.9.0.iso,https://github.com/kubernetes/minikube/releases/download/v1.9.0/minikube-v1.9.0.iso,https://kubernetes.oss-cn-hangzhou.aliyuncs.com/minikube/iso/minikube-v1.9.0.iso]) --keep-context This will keep the existing kubectl context and will create a minikube context. - --kubernetes-version string The kubernetes version that the minikube VM will use (ex: v1.2.3) + --kubernetes-version string The kubernetes version that the minikube VM will use (ex: v1.2.3, 'stable' for v1.18.0, 'latest' for v1.18.0). Defaults to 'stable'. --kvm-gpu Enable experimental NVIDIA GPU support in minikube --kvm-hidden Hide the hypervisor signature from the guest in minikube (kvm2 driver only) --kvm-network string The KVM network name. (kvm2 driver only) (default "default") --kvm-qemu-uri string The KVM QEMU connection URI. (kvm2 driver only) (default "qemu:///system") - --memory string Amount of RAM allocated to the minikube VM (format: [], where unit = b, k, m or g). (default "2000mb") + --memory string Amount of RAM to allocate to Kubernetes (format: [], where unit = b, k, m or g). --mount This will start the mount daemon and automatically mount files into minikube. - --mount-string string The argument to pass the minikube mount command on start. (default "/Users:/minikube-host") + --mount-string string The argument to pass the minikube mount command on start. --nat-nic-type string NIC Type used for host only network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only) (default "virtio") --native-ssh Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'. (default true) --network-plugin string The name of the network plugin. --nfs-share strings Local folders to share with Guest via NFS mounts (hyperkit driver only) --nfs-shares-root string Where to root the NFS Shares, defaults to /nfsshares (hyperkit driver only) (default "/nfsshares") --no-vtx-check Disable checking for the availability of hardware virtualization before the vm is started (virtualbox driver only) + -n, --nodes int The number of nodes to spin up. Defaults to 1. (default 1) + --preload If set, download tarball of preloaded images if available to improve start time. Defaults to true. (default true) --registry-mirror strings Registry mirrors to pass to the Docker daemon --service-cluster-ip-range string The CIDR to be used for service cluster IPs. (default "10.96.0.0/12") --uuid string Provide VM UUID to restore MAC address (hyperkit driver only) - --vm-driver string Driver is one of: virtualbox, parallels, vmwarefusion, hyperkit, vmware, docker (experimental) (defaults to auto-detect) - --wait Block until the apiserver is servicing API requests (default true) - --wait-timeout duration max time to wait per Kubernetes core services to be healthy. (default 6m0s)``` + --vm Filter to use only VM Drivers + --vm-driver driver DEPRECATED, use driver instead. + --wait strings comma separated list of kubernetes components to verify and wait for after starting a cluster. defaults to "apiserver,system_pods", available options: "apiserver,system_pods,default_sa,apps_running" . other acceptable values are 'all' or 'none', 'true' and 'false' (default [apiserver,system_pods]) + --wait-timeout duration max time to wait per Kubernetes core services to be healthy. (default 6m0s) +``` ### Options inherited from parent commands @@ -93,3 +103,4 @@ minikube start [flags] -v, --v Level log level for V logs --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging ``` + diff --git a/site/content/en/docs/Reference/Commands/profile.md b/site/content/en/docs/commands/status.md similarity index 51% rename from site/content/en/docs/Reference/Commands/profile.md rename to site/content/en/docs/commands/status.md index 88505fcf35..fd1462f33d 100644 --- a/site/content/en/docs/Reference/Commands/profile.md +++ b/site/content/en/docs/commands/status.md @@ -1,27 +1,33 @@ --- -title: "profile" -linkTitle: "profile" -weight: 1 -date: 2019-08-01 +title: "status" description: > - Profile gets or sets the current minikube profile + Gets the status of a local kubernetes cluster --- -### Overview -profile sets the current minikube profile, or gets the current profile if no arguments are provided. This is used to run and manage multiple minikube instance. You can return to the default minikube profile by running `minikube profile default` -### Usage +## minikube status + +Gets the status of a local kubernetes cluster + +### Synopsis + +Gets the status of a local kubernetes cluster. + Exit status contains the status of minikube's VM, cluster and kubernetes encoded on it's bits in this order from right to left. + Eg: 7 meaning: 1 (for minikube NOK) + 2 (for cluster NOK) + 4 (for kubernetes NOK) ``` -minikube profile [MINIKUBE_PROFILE_NAME] - -You can return to the default minikube profile by running `minikube profile default` [flags] +minikube status [flags] ``` -## Subcommands +### Options -- **list**: Lists all minikube profiles. +``` + -f, --format string Go template format string for the status output. The format for Go templates can be found here: https://golang.org/pkg/text/template/ + For the list accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#Status (default "{{.Name}}\nhost: {{.Host}}\nkubelet: {{.Kubelet}}\napiserver: {{.APIServer}}\nkubeconfig: {{.Kubeconfig}}\n\n") + -h, --help help for status + -o, --output string minikube status --output OUTPUT. json, text (default "text") +``` ### Options inherited from parent commands @@ -37,22 +43,3 @@ You can return to the default minikube profile by running `minikube profile defa --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging ``` - -## minikube profile list - -Lists all minikube profiles. - -### Overview - -Lists all valid minikube profiles and detects all possible invalid profiles. - -``` -minikube profile list [flags] -``` - -### Options - -``` - -h, --help help for list - -o, --output string The output format. One of 'json', 'table' (default "table") -``` diff --git a/site/content/en/docs/Reference/Commands/stop.md b/site/content/en/docs/commands/stop.md similarity index 83% rename from site/content/en/docs/Reference/Commands/stop.md rename to site/content/en/docs/commands/stop.md index 0bb9e63fba..08e190832b 100644 --- a/site/content/en/docs/Reference/Commands/stop.md +++ b/site/content/en/docs/commands/stop.md @@ -1,23 +1,30 @@ --- title: "stop" -linkTitle: "stop" -weight: 1 -date: 2019-08-01 description: > - Stops a running local Kubernetes cluster + Stops a running local kubernetes cluster --- -### Overview -Stops a local Kubernetes cluster running in Virtualbox. This command stops the VM + +## minikube stop + +Stops a running local kubernetes cluster + +### Synopsis + +Stops a local kubernetes cluster running in Virtualbox. This command stops the VM itself, leaving all files intact. The cluster can be started again with the "start" command. -### Usage - ``` minikube stop [flags] ``` +### Options + +``` + -h, --help help for stop +``` + ### Options inherited from parent commands ``` @@ -31,3 +38,4 @@ minikube stop [flags] -v, --v Level log level for V logs --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging ``` + diff --git a/site/content/en/docs/Reference/Commands/tunnel.md b/site/content/en/docs/commands/tunnel.md similarity index 85% rename from site/content/en/docs/Reference/Commands/tunnel.md rename to site/content/en/docs/commands/tunnel.md index e33e4d80a1..72b73a316b 100644 --- a/site/content/en/docs/Reference/Commands/tunnel.md +++ b/site/content/en/docs/commands/tunnel.md @@ -1,17 +1,18 @@ --- title: "tunnel" -linkTitle: "tunnel" -weight: 1 -date: 2019-08-01 description: > tunnel makes services of type LoadBalancer accessible on localhost --- -### Overview -tunnel creates a route to services deployed with type LoadBalancer and sets their Ingress to their ClusterIP -### Usage +## minikube tunnel + +tunnel makes services of type LoadBalancer accessible on localhost + +### Synopsis + +tunnel creates a route to services deployed with type LoadBalancer and sets their Ingress to their ClusterIP. for a detailed example see https://minikube.sigs.k8s.io/docs/tasks/loadbalancer ``` minikube tunnel [flags] diff --git a/site/content/en/docs/Reference/Commands/unpause.md b/site/content/en/docs/commands/unpause.md similarity index 89% rename from site/content/en/docs/Reference/Commands/unpause.md rename to site/content/en/docs/commands/unpause.md index 1b71bce9df..3ae61076af 100644 --- a/site/content/en/docs/Reference/Commands/unpause.md +++ b/site/content/en/docs/commands/unpause.md @@ -1,14 +1,18 @@ --- title: "unpause" -linkTitle: "unpause" -weight: 1 -date: 2020-02-05 description: > - unpause the Kubernetes control plane or other namespaces - + unpause Kubernetes --- -### Usage + + +## minikube unpause + +unpause Kubernetes + +### Synopsis + +unpause Kubernetes ``` minikube unpause [flags] @@ -36,7 +40,3 @@ minikube unpause [flags] --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging ``` -### SEE ALSO - -* [pause](pause.md) - diff --git a/site/content/en/docs/commands/update-check.md b/site/content/en/docs/commands/update-check.md new file mode 100644 index 0000000000..c472068813 --- /dev/null +++ b/site/content/en/docs/commands/update-check.md @@ -0,0 +1,40 @@ +--- +title: "update-check" +description: > + Print current and latest version number +--- + + + +## minikube update-check + +Print current and latest version number + +### Synopsis + +Print current and latest version number + +``` +minikube update-check [flags] +``` + +### Options + +``` + -h, --help help for update-check +``` + +### Options inherited from parent commands + +``` + --alsologtostderr log to standard error as well as files + -b, --bootstrapper string The name of the cluster bootstrapper that will set up the kubernetes cluster. (default "kubeadm") + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + -p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube") + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + diff --git a/site/content/en/docs/Reference/Commands/update-context.md b/site/content/en/docs/commands/update-context.md similarity index 76% rename from site/content/en/docs/Reference/Commands/update-context.md rename to site/content/en/docs/commands/update-context.md index ba6e3c0b23..4e200a747b 100644 --- a/site/content/en/docs/Reference/Commands/update-context.md +++ b/site/content/en/docs/commands/update-context.md @@ -1,19 +1,31 @@ --- title: "update-context" -linkTitle: "update-context" -weight: 1 -date: 2019-08-01 description: > Verify the IP address of the running cluster in kubeconfig. --- -The `update-context` command retrieves the IP address of the running cluster, checks it with IP in kubeconfig, and corrects kubeconfig if incorrect: + + +## minikube update-context + +Verify the IP address of the running cluster in kubeconfig. + +### Synopsis + +Retrieves the IP address of the running cluster, checks it + with IP in kubeconfig, and corrects kubeconfig if incorrect. ``` minikube update-context [flags] ``` -## Options inherited from parent commands +### Options + +``` + -h, --help help for update-context +``` + +### Options inherited from parent commands ``` --alsologtostderr log to standard error as well as files @@ -26,3 +38,4 @@ minikube update-context [flags] -v, --v Level log level for V logs --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging ``` + diff --git a/site/content/en/docs/Reference/Commands/version.md b/site/content/en/docs/commands/version.md similarity index 82% rename from site/content/en/docs/Reference/Commands/version.md rename to site/content/en/docs/commands/version.md index df909b4153..4aeddee307 100644 --- a/site/content/en/docs/Reference/Commands/version.md +++ b/site/content/en/docs/commands/version.md @@ -1,8 +1,16 @@ +--- +title: "version" +description: > + Print the version of minikube +--- + + + ## minikube version Print the version of minikube -### Overview +### Synopsis Print the version of minikube. @@ -13,7 +21,9 @@ minikube version [flags] ### Options ``` - -h, --help help for version + -h, --help help for version + -o, --output string One of 'yaml' or 'json'. + --short Print just the version number. ``` ### Options inherited from parent commands diff --git a/site/content/en/docs/Contributing/_index.md b/site/content/en/docs/contrib/_index.md similarity index 78% rename from site/content/en/docs/Contributing/_index.md rename to site/content/en/docs/contrib/_index.md index 7aef294ff1..46ef7101d3 100644 --- a/site/content/en/docs/Contributing/_index.md +++ b/site/content/en/docs/contrib/_index.md @@ -4,4 +4,6 @@ linkTitle: "Contributing" weight: 10 description: > How to contribute to minikube +aliases: + - /docs/contributing --- diff --git a/site/content/en/docs/Contributing/addons.en.md b/site/content/en/docs/contrib/addons.en.md similarity index 100% rename from site/content/en/docs/Contributing/addons.en.md rename to site/content/en/docs/contrib/addons.en.md diff --git a/site/content/en/docs/contrib/building/_index.md b/site/content/en/docs/contrib/building/_index.md new file mode 100644 index 0000000000..c9faa7ba02 --- /dev/null +++ b/site/content/en/docs/contrib/building/_index.md @@ -0,0 +1,5 @@ +--- +title: "Building" +linkTitle: "Building" +weight: 2 +--- diff --git a/site/content/en/docs/Contributing/building.en.md b/site/content/en/docs/contrib/building/binaries.md similarity index 100% rename from site/content/en/docs/Contributing/building.en.md rename to site/content/en/docs/contrib/building/binaries.md diff --git a/site/content/en/docs/Contributing/iso.md b/site/content/en/docs/contrib/building/iso.md similarity index 99% rename from site/content/en/docs/Contributing/iso.md rename to site/content/en/docs/contrib/building/iso.md index f764d05388..3d5eabe70b 100644 --- a/site/content/en/docs/Contributing/iso.md +++ b/site/content/en/docs/contrib/building/iso.md @@ -1,5 +1,5 @@ --- -linkTitle: "ISO" +linkTitle: "ISO Build" title: "Building the minikube ISO" date: 2019-08-09 weight: 4 diff --git a/site/content/en/docs/Contributing/documentation.en.md b/site/content/en/docs/contrib/documentation.en.md similarity index 80% rename from site/content/en/docs/Contributing/documentation.en.md rename to site/content/en/docs/contrib/documentation.en.md index b2cd2b9971..f2bab66420 100644 --- a/site/content/en/docs/Contributing/documentation.en.md +++ b/site/content/en/docs/contrib/documentation.en.md @@ -12,13 +12,20 @@ minikube's documentation is in [Markdown](https://www.markdownguide.org/cheat-sh In production, the minikube website is served using [Netlify](https://netlify.com/) +## Small or cosmetic contributions + +Use Github's repositories and markdown editor as described by [Kubernetes's general guideline for documentation contributing](https://kubernetes.io/docs/contribute/start/#submit-a-pull-request) + ## Local documentation website To serve documentation pages locally, clone the `minikube` repository and run: `make site` -NOTE: On Windows, our site currently causes Hugo to `panic`. +Notes : + +* On GNU/Linux, golang package shipped with the distribution may not be recent enough. Use the latest version. +* On Windows, our site currently causes Hugo to `panic`. ## Lint diff --git a/site/content/en/docs/Contributing/drivers.en.md b/site/content/en/docs/contrib/drivers.en.md similarity index 100% rename from site/content/en/docs/Contributing/drivers.en.md rename to site/content/en/docs/contrib/drivers.en.md diff --git a/site/content/en/docs/Contributing/guide.en.md b/site/content/en/docs/contrib/guide.en.md similarity index 100% rename from site/content/en/docs/Contributing/guide.en.md rename to site/content/en/docs/contrib/guide.en.md diff --git a/site/content/en/docs/Concepts/principles.en.md b/site/content/en/docs/contrib/principles.en.md similarity index 81% rename from site/content/en/docs/Concepts/principles.en.md rename to site/content/en/docs/contrib/principles.en.md index 7ba4e884ed..a97404e6a1 100644 --- a/site/content/en/docs/Concepts/principles.en.md +++ b/site/content/en/docs/contrib/principles.en.md @@ -1,17 +1,18 @@ --- title: "Principles" -date: 2019-06-18T15:31:58+08:00 +aliases: + - /docs/concepts/principles --- The primary goal of minikube is to make it simple to run Kubernetes locally, for day-to-day development workflows and learning purposes. Here are the guiding principles for minikube, in rough priority order: 1. Inclusive and community-driven -1. User-friendly -1. Support all Kubernetes features -1. Cross-platform -1. Reliable -1. High Performance -1. Developer Focused +2. User-friendly +3. Support all Kubernetes features +4. Cross-platform +5. Reliable +6. High Performance +7. Developer Focused Here are some specific minikube features that align with our goal: diff --git a/site/content/en/docs/contrib/releasing/_index.md b/site/content/en/docs/contrib/releasing/_index.md new file mode 100644 index 0000000000..0c2338eea5 --- /dev/null +++ b/site/content/en/docs/contrib/releasing/_index.md @@ -0,0 +1,5 @@ +--- +title: "Releasing" +linkTitle: "Releasing" +weight: 99 +--- diff --git a/site/content/en/docs/Contributing/releasing.en.md b/site/content/en/docs/contrib/releasing/binaries.md similarity index 86% rename from site/content/en/docs/Contributing/releasing.en.md rename to site/content/en/docs/contrib/releasing/binaries.md index e4d8a3a52c..1f5702dfed 100644 --- a/site/content/en/docs/Contributing/releasing.en.md +++ b/site/content/en/docs/contrib/releasing/binaries.md @@ -1,9 +1,8 @@ --- -title: "Releasing" -date: 2019-07-31 +title: "Binaries" weight: 9 description: > - How to release minikube + How to release minikube binaries --- ## Preparation @@ -16,19 +15,7 @@ description: > ## Build a new ISO -Major releases always get a new ISO. Minor bugfixes may or may not require it: check for changes in the `deploy/iso` folder. -To check, run `git log -- deploy/iso` from the root directory and see if there has been a commit since the most recent release. - -Note: you can build the ISO using the `hack/jenkins/build_iso.sh` script locally. - -* Navigate to the minikube ISO jenkins job -* Ensure that you are logged in (top right) -* Click "▶️ Build with Parameters" (left) -* For `ISO_VERSION`, type in the intended release version (same as the minikube binary's version) -* For `ISO_BUCKET`, type in `minikube/iso` -* Click *Build* - -The build will take roughly 50 minutes. +Major releases always get a new ISO. See [ISO release instructions]({{}}) ## Update Makefile diff --git a/site/content/en/docs/Contributing/gvisor.md b/site/content/en/docs/contrib/releasing/gvisor.md similarity index 100% rename from site/content/en/docs/Contributing/gvisor.md rename to site/content/en/docs/contrib/releasing/gvisor.md diff --git a/site/content/en/docs/contrib/releasing/iso.md b/site/content/en/docs/contrib/releasing/iso.md new file mode 100644 index 0000000000..5c1fdc1963 --- /dev/null +++ b/site/content/en/docs/contrib/releasing/iso.md @@ -0,0 +1,19 @@ +--- +title: "ISO" +description: > + How to release a new minikube ISO +--- + +Major releases always get a new ISO. Minor bugfixes may or may not require it: check for changes in the `deploy/iso` folder. +To check, run `git log -- deploy/iso` from the root directory and see if there has been a commit since the most recent release. + +Note: you can build the ISO using the `hack/jenkins/build_iso.sh` script locally. + +* Navigate to the minikube ISO jenkins job +* Ensure that you are logged in (top right) +* Click "▶️ Build with Parameters" (left) +* For `ISO_VERSION`, type in the intended release version (same as the minikube binary's version) +* For `ISO_BUCKET`, type in `minikube/iso` +* Click *Build* + +The build will take roughly 50 minutes. diff --git a/site/content/en/docs/Contributing/roadmap.en.md b/site/content/en/docs/contrib/roadmap.en.md similarity index 88% rename from site/content/en/docs/Contributing/roadmap.en.md rename to site/content/en/docs/contrib/roadmap.en.md index 37b135a78f..d169145a59 100644 --- a/site/content/en/docs/Contributing/roadmap.en.md +++ b/site/content/en/docs/contrib/roadmap.en.md @@ -14,9 +14,9 @@ Please send a PR to suggest any improvements to it. ## (#1) Inclusive and community-driven -- [ ] Maintainers from 4 countries, 4 companies +- [x] Maintainers from 4 countries, 4 companies - [ ] Installation documentation in 5+ written languages -- [ ] Enhancements approved by a community-driven process +- [x] Enhancements approved by a community-driven process ## (#2) User-friendly @@ -33,7 +33,7 @@ Please send a PR to suggest any improvements to it. ## (#4) Cross-platform -- [ ] VM-free deployment to containers (Docker, Podman) +- [x] VM-free deployment to containers (Docker, Podman) - [ ] Windows as a first-class citizen - [ ] WSL2 support (no additional VM required) - [ ] Firecracker VM support @@ -49,7 +49,7 @@ Please send a PR to suggest any improvements to it. - [ ] Startup latency under 30s - [ ] Kernel-assisted mounts (CIFS, NFS) by default -- [ ] Suspend and Resume +- [x] Pause support - [ ] <25% CPU overhead on a single core ## (#7) Developer Focused diff --git a/site/content/en/docs/Contributing/testing.en.md b/site/content/en/docs/contrib/testing.en.md similarity index 100% rename from site/content/en/docs/Contributing/testing.en.md rename to site/content/en/docs/contrib/testing.en.md diff --git a/site/content/en/docs/contrib/translations.md b/site/content/en/docs/contrib/translations.md new file mode 100644 index 0000000000..38f5016600 --- /dev/null +++ b/site/content/en/docs/contrib/translations.md @@ -0,0 +1,92 @@ +--- +title: "Translations" +date: 2019-09-30 +weight: 3 +description: > + How to add translations +--- + +All translations are stored in the top-level `translations` directory. + +### Adding a New Language +* Add a new json file in the translations directory with the locale code of the language you want to add + translations for, e.g. fr for French. + ``` + ~/minikube$ touch translations/fr.json + ~/minikube$ ls translations/ + de.json es.json fr.json ja.json ko.json pl.json zh-CN.json + ``` +* Run `make extract` from root to populate that file with the strings to translate in json + form. + ``` + ~/minikube$ make extract + go run cmd/extract/extract.go + Compiling translation strings... + Writing to de.json + Writing to es.json + Writing to fr.json + Writing to ja.json + Writing to ko.json + Writing to pl.json + Writing to zh-CN.json + Done! + ``` +* Add translated strings to the json file as the value of the map where the English phrase is the key. + ``` + ~/minikube$ head translations/fr.json + { + "\"The '{{.minikube_addon}}' addon is disabled": "", + "\"{{.machineName}}\" does not exist, nothing to stop": "", + "\"{{.name}}\" profile does not exist, trying anyways.": "", + "'none' driver does not support 'minikube docker-env' command": "", + "'none' driver does not support 'minikube mount' command": "", + "'none' driver does not support 'minikube podman-env' command": "", + "'none' driver does not support 'minikube ssh' command": "", + "'{{.driver}}' driver reported an issue: {{.error}}": "", + ``` + * Add the translations as the values of the map, keeping in mind that anything in double braces `{{}}` are variable names describing what gets injected and should not be translated. + ``` + ~/minikube$ vi translations/fr.json + { + [...] + "Amount of time to wait for a service in seconds": "", + "Amount of time to wait for service in seconds": "", + "Another hypervisor, such as VirtualBox, is conflicting with KVM. Please stop the other hypervisor, or use --driver to switch to it.": "", + "Automatically selected the {{.driver}} driver": "Choix automatique du driver {{.driver}}", + "Automatically selected the {{.driver}} driver. Other choices: {{.alternates}}": "Choix automatique du driver {{.driver}}. Autres choix: {{.alternatives}}", + "Available Commands": "", + "Basic Commands:": "", + "Because you are using docker driver on Mac, the terminal needs to be open to run it.": "", + [...] + } + ``` + +### Adding Translations To an Existing Language +* Run `make extract` to make sure all strings are up to date +* Edit the appropriate json file in the 'translations' directory, in the same way as described above. + +### Testing translations +* Once you have all the translations you want, save the file and rebuild the minikube from scratch to pick up your new translations: + ``` + ~/minikube$ make clean + rm -rf ./out + rm -f pkg/minikube/assets/assets.go + rm -f pkg/minikube/translate/translations.go + rm -rf ./vendor + ~/minikube$ make + ``` + Note: the clean is required to regenerate the embedded `translations.go` file + +* You now have a fresh minikube binary in the `out` directory. If your system locale is that of the language you added translations for, a simple `out/minikube start` will work as a test, assuming you translated phrases from `minikube start`. You can use whatever command you'd like in that way. + +* If you have a different system locale, you can override the printed language using the LC_ALL environment variable: + ``` + ~/minikube$ LC_ALL=fr out/minikube start + 😄 minikube v1.9.2 sur Darwin 10.14.5 + ✨ Choix automatique du driver hyperkit. Autres choix: docker + 👍 Démarrage du noeud de plan de contrôle minikube dans le cluster minikube + 🔥 Création de VM hyperkit (CPUs=2, Mémoire=4000MB, Disque=20000MB)... + 🐳 Préparation de Kubernetes v1.18.0 sur Docker 19.03.8... + 🌟 Installation des addons: default-storageclass, storage-provisioner + 🏄 Terminé ! kubectl est maintenant configuré pour utiliser "minikube". + ``` diff --git a/site/content/en/docs/contrib/triage.md b/site/content/en/docs/contrib/triage.md new file mode 100644 index 0000000000..93b3c403e5 --- /dev/null +++ b/site/content/en/docs/contrib/triage.md @@ -0,0 +1,219 @@ +--- +linkTitle: "Triage" +title: "Triaging Minikube Issues" +date: 2020-03-17 +weight: 10 +description: > + How to triage issues in the minikube repo +--- + +Triage is an important part of maintaining the health of the minikube repo. +A well organized repo allows maintainers to prioritize feature requests, fix bugs, and respond to users facing difficulty with the tool as quickly as possible. + +Triage includes: +- Labeling issues +- Responding to issues +- Closing issues + +If you're interested in helping out with minikube triage, this doc covers the basics of doing so. + +Additionally, if you'd be interested in participating in our weekly triage meeting, please fill out this [form](https://forms.gle/vNtWZSWXqeYaaNbU9) to express interest. Thank you! + +# Daily Triage +Daily triage has two goals: + +1. Responsiveness for new issues +1. Responsiveness when explicitly requested information was provided + +The list of outstanding items are at http://tinyurl.com/mk-tparty/daily-triage - it covers: + +1. Issues without a `kind/` or `triage/` label +1. Issues without a `priority/` label +1. `triage/needs-information` issues which the user has followed up on, and now require a response. + +## Categorization + +The most important level of categorizing the issue is defining what type it is. +We typically want at least one of the following labels on every issue, and some issues may fall into multiple categories: + +- `triage/support` - The default for most incoming issues +- `kind/bug` - When it’s a bug or we aren’t delivering the best user experience + +Other possibilities: +- `kind/feature`- Identify new feature requests +- `kind/flake` - Used for flaky integration or unit tests +- `kind/cleanup` - Cleaning up/refactoring the codebase +- `kind/documentation` - Updates or additions to minikube documentation +- `kind/ux` - Issues that involve improving user experience +- `kind/security` - When there's a security vulnerability in minikube + +If the issue is specific to an operating system, hypervisor, container, addon, or Kubernetes component: + +**os/[operating system]** - When the issue appears specific to an operating system + + - `os/linux` + - `os/macos` + - `os/windows` + +**co/[driver]** - When the issue appears specific to a driver + + - `co/hyperkit` + - `co/hyperv` + - `co/kvm2` + - `co/none-driver` + - `co/docker-driver` + - `co/podman-driver` + - `co/virtualbox` + +**co/[kubernetes component]** - When the issue appears specific to a k8s component + + - `co/apiserver` + - `co/etcd` + - `co/coredns` + - `co/dashboard` + - `co/kube-proxy` + - `co/kubeadm` + - `co/kubelet` + - `co/kubeconfig` + + +Other useful tags: + +Did an **Event** occur that we can dedup similar issues against? + +- `ev/CrashLoopBackoff` +- `ev/Panic` +- `ev/Pending` +- `ev/kubeadm-exit-1` + +Suspected **Root cause**: + +- `cause/vm-environment` +- `cause/invalid-kubelet-options` + +**Help wanted?** + +`Good First Issue` - bug has a proposed solution, can be implemented w/o further discussion. + +`Help wanted` - if the bug could use help from a contributor + + +## Prioritization +If the issue is not `triage/support`, it needs a [priority label](https://github.com/kubernetes/community/blob/master/contributors/guide/issue-triage.md#define-priority): + +`priority/critical-urgent` - someones top priority ASAP, such as security issue, user-visible bug, or build breakage. Rarely used. + +`priority/important-soon`: in time for the next two releases (8 weeks) + +`priority/important-longterm`: 2-4 releases from now + +`priority/backlog`: agreed that this would be good to have, but no one is available at the moment. Consider tagging as `help wanted` + +`priority/awaiting-more-evidence`: may be more useful, but there is not yet enough support. + + +# Weekly Triage + +Weekly triage has three goals: + +1. Catching up on unresponded issues +1. Reviewing and closing PR’s +1. Closing stale issues + +The list of outstanding items can be found at http://tinyurl.com/mk-tparty/weekly-triage. + +## Post-Release Triage + +Post-release triage occurs after a major release (around every 4-6 weeks). +It focuses on: + +1. Closing bugs that have been resolved by the release +1. Reprioritizing bugs that have not been resolved by the release +1. Letting users know if we believe that there is still an issue + +This includes reviewing: + +1. Every issue that hasn’t been touched in the last 2 days +1. Re-evaluation of long-term issues +1. Re-evaluation of short-term issues + + +## Responding to Issues + +### Needs More Information +A sample response to ask for more info: + +> I don’t yet have a clear way to replicate this issue. Do you mind adding some additional details. Here is additional information that would be helpful: +> +> \* The exact `minikube start` command line used +> +> \* The full output of the `minikube start` command, preferably with `--alsologtostderr -v=4` for extra logging. +> +> \* The full output of `minikube logs` +> +> \* The full output of `kubectl get po -A` +> +> +> +> Thank you for sharing your experience! + + +Then: Label with `triage/needs-information`. + +### Issue might be resolved +If you think a release may have resolved an issue, ask the author to see if their issue has been resolved: + +> Could you please check to see if minikube addresses this issue? We've made some changes with how this is handled, and improved the minikube logs output to help us debug tricky cases like this. + +Then: Label with `triage/needs-information`. + + +## Closing with Care + +Issues typically need to be closed for the following reasons: + +- The issue has been addressed +- The issue is a duplicate of an existing issue +- There has been a lack of information over a long period of time + +In any of these situations, we aim to be kind when closing the issue, and offer the author action items should they need to reopen their issue or still require a solution. + +Samples responses for these situations include: + +### Issue has been addressed + +>@author: I believe this issue is now addressed by minikube v1.4, as it . If you still see this issue with minikube v1.4 or higher, please reopen this issue by commenting with `/reopen` +> +>Thank you for reporting this issue! + +Then: Close the issue + +### Duplicate Issue + +>This issue appears to be a duplicate of #X, do you mind if we move the conversation there? +> +>This way we can centralize the content relating to the issue. If you feel that this issue is not in fact a duplicate, please re-open it using `/reopen`. If you have additional information to share, please add it to the new issue. +> +>Thank you for reporting this! + +Then: Label with `triage/duplicate` and close the issue. + +### Lack of Information +If an issue hasn't been active for more than four weeks, and the author has been pinged at least once, then the issue can be closed. + +>Hey @author -- hopefully it's OK if I close this - there wasn't enough information to make it actionable, and some time has already passed. If you are able to provide additional details, you may reopen it at any point by adding /reopen to your comment. +> +>Here is additional information that may be helpful to us: +> +>\* Whether the issue occurs with the latest minikube release +> +>\* The exact `minikube start` command line used +> +>\* The full output of the `minikube start` command, preferably with `--alsologtostderr -v=3` for extra logging. +> +>\* The full output of `minikube logs` +> +> +>Thank you for sharing your experience! + +Then: Close the issue. diff --git a/site/content/en/docs/drivers/_index.md b/site/content/en/docs/drivers/_index.md new file mode 100644 index 0000000000..21ef03d4b4 --- /dev/null +++ b/site/content/en/docs/drivers/_index.md @@ -0,0 +1,35 @@ +--- +title: "Drivers" +linkTitle: "Drivers" +weight: 8 +no_list: true +description: > + Configuring various minikube drivers +aliases: + - /docs/reference/drivers +--- +minikube can be deployed as a VM, a container, or bare-metal. + +To do so, we use the [Docker Machine](https://github.com/docker/machine) library to provide a consistent way to interact with different environments. Here is what's supported: + +## Linux + +* [Docker]({{}}) - container-based (preferred) +* [KVM2]({{}}) - VM-based (preferred) +* [VirtualBox]({{}}) - VM +* [None]({{}}) - bare-metal +* [Podman]({{}}) - container (experimental) + +## macOS + +* [Hyperkit]({{}}) - VM (preferred) +* [Docker]({{}}) - VM + Container +* [VirtualBox]({{}}) - FVM +* [Parallels]({{}}) - VM +* [VMware]({{}}) - VM + +## Windows + +* [Hyper-V]({{}}) - VM (preferred) +* [Docker]({{}}) - VM + Container (preferred) +* [VirtualBox]({{}}) - VM diff --git a/site/content/en/docs/drivers/docker.md b/site/content/en/docs/drivers/docker.md new file mode 100644 index 0000000000..248428753d --- /dev/null +++ b/site/content/en/docs/drivers/docker.md @@ -0,0 +1,37 @@ +--- +title: "docker" +weight: 3 +aliases: + - /docs/reference/drivers/docker +--- + +## Overview + +The Docker driver allows you to install Kubernetes into an existing Docker install. On Linux, this does not require virtualization to be enabled. + +{{% readfile file="/docs/drivers/includes/docker_usage.inc" %}} + +## Special features + +- Cross platform (linux, macOS, Windows) +- No hypervisor required when run on Linux +- Experimental support for [WSL2](https://docs.microsoft.com/en-us/windows/wsl/wsl2-install) on Windows 10 + +## Known Issues + +- On macOS, containers might get hung and require a restart of Docker for Desktop. See [docker/for-mac#1835](https://github.com/docker/for-mac/issues/1835) + +- The `ingress`, `ingress-dns` and `registry` addons are currently only supported on Linux. See [#7332](https://github.com/kubernetes/minikube/issues/7332) and [#7535](https://github.com/kubernetes/minikube/issues/7535) + +- On WSL2 (experimental - see [#5392](https://github.com/kubernetes/minikube/issues/5392)), you may need to run: + + `sudo mkdir /sys/fs/cgroup/systemd && sudo mount -t cgroup -o none,name=systemd cgroup /sys/fs/cgroup/systemd`. + +- Addon 'registry' for mac and windows is not supported yet and it is [a work in progress](https://github.com/kubernetes/minikube/issues/7535). + + + +## Troubleshooting + +- On macOS or Windows, you may need to restart Docker for Desktop if a command gets hung +- Run `--alsologtostderr -v=1` for extra debugging information diff --git a/site/content/en/docs/Reference/Drivers/hyperkit.md b/site/content/en/docs/drivers/hyperkit.md similarity index 90% rename from site/content/en/docs/Reference/Drivers/hyperkit.md rename to site/content/en/docs/drivers/hyperkit.md index 2b72f11719..49be1a066f 100644 --- a/site/content/en/docs/Reference/Drivers/hyperkit.md +++ b/site/content/en/docs/drivers/hyperkit.md @@ -1,17 +1,15 @@ --- title: "hyperkit" -linkTitle: "hyperkit" weight: 1 -date: 2018-08-08 -description: > - HyperKit driver +aliases: + - /docs/reference/drivers/hyperkit --- ## Overview [HyperKit](https://github.com/moby/hyperkit) is an open-source hypervisor for macOS hypervisor, optimized for lightweight virtual machines and container deployment. -{{% readfile file="/docs/Reference/Drivers/includes/hyperkit_usage.inc" %}} +{{% readfile file="/docs/drivers/includes/hyperkit_usage.inc" %}} ## Special features diff --git a/site/content/en/docs/Reference/Drivers/hyperv.md b/site/content/en/docs/drivers/hyperv.md similarity index 74% rename from site/content/en/docs/Reference/Drivers/hyperv.md rename to site/content/en/docs/drivers/hyperv.md index 909f1e03f8..78d5528ecd 100644 --- a/site/content/en/docs/Reference/Drivers/hyperv.md +++ b/site/content/en/docs/drivers/hyperv.md @@ -1,18 +1,14 @@ --- title: "hyperv" -linkTitle: "hyperv" weight: 2 -date: 2017-01-05 -date: 2018-08-05 -description: > - Microsoft Hyper-V driver +aliases: + - /docs/reference/drivers/hyperv --- - ## Overview Hyper-V is a native hypervisor built in to modern versions of Microsoft Windows. -{{% readfile file="/docs/Reference/Drivers/includes/hyperv_usage.inc" %}} +{{% readfile file="/docs/drivers/includes/hyperv_usage.inc" %}} ## Special features diff --git a/site/content/en/docs/drivers/includes/check_baremetal_linux.inc b/site/content/en/docs/drivers/includes/check_baremetal_linux.inc new file mode 100644 index 0000000000..da6797ff8c --- /dev/null +++ b/site/content/en/docs/drivers/includes/check_baremetal_linux.inc @@ -0,0 +1,7 @@ +To use baremetal driver (none driver). verify that your operating system is Linux and also have 'systemd' installed. + +```shell +pidof systemd && echo "yes" || echo "no" +``` +If the above command outputs "no": +Your system is not suitable for none driver. \ No newline at end of file diff --git a/site/content/en/docs/drivers/includes/check_virtualization_linux.inc b/site/content/en/docs/drivers/includes/check_virtualization_linux.inc new file mode 100644 index 0000000000..3f60068016 --- /dev/null +++ b/site/content/en/docs/drivers/includes/check_virtualization_linux.inc @@ -0,0 +1,11 @@ +To use VM drivers, verify that your system has virtualization support enabled: + +```shell +egrep -q 'vmx|svm' /proc/cpuinfo && echo yes || echo no +``` + +If the above command outputs "no": + +- If you are running within a VM, your hypervisor does not allow nested virtualization. You will need to use the *None (bare-metal)* driver +- If you are running on a physical machine, ensure that your BIOS has hardware virtualization enabled + diff --git a/site/content/en/docs/drivers/includes/check_virtualization_windows.inc b/site/content/en/docs/drivers/includes/check_virtualization_windows.inc new file mode 100644 index 0000000000..14812b61ec --- /dev/null +++ b/site/content/en/docs/drivers/includes/check_virtualization_windows.inc @@ -0,0 +1,19 @@ +To check if virtualization is supported, run the following command on your Windows terminal or command prompt. + +```shell +systeminfo +``` +If you see the following output, virtualization is supported: + +```shell +Hyper-V Requirements: VM Monitor Mode Extensions: Yes + Virtualization Enabled In Firmware: Yes + Second Level Address Translation: Yes + Data Execution Prevention Available: Yes +``` + +If you see the following output, your system already has a Hypervisor installed and you can skip the next step. + +```shell +Hyper-V Requirements: A hypervisor has been detected. +``` \ No newline at end of file diff --git a/site/content/en/docs/drivers/includes/docker_usage.inc b/site/content/en/docs/drivers/includes/docker_usage.inc new file mode 100644 index 0000000000..df96d517ec --- /dev/null +++ b/site/content/en/docs/drivers/includes/docker_usage.inc @@ -0,0 +1,16 @@ +## Install Docker + +- [Docker Desktop](https://hub.docker.com/search?q=&type=edition&offering=community&sort=updated_at&order=desc) + +## Usage + +Start a cluster using the docker driver: + +```shell +minikube start --driver=docker +``` +To make docker the default driver: + +```shell +minikube config set driver docker +``` diff --git a/site/content/en/docs/Reference/Drivers/includes/hyperkit_usage.inc b/site/content/en/docs/drivers/includes/hyperkit_usage.inc similarity index 100% rename from site/content/en/docs/Reference/Drivers/includes/hyperkit_usage.inc rename to site/content/en/docs/drivers/includes/hyperkit_usage.inc diff --git a/site/content/en/docs/Reference/Drivers/includes/hyperv_usage.inc b/site/content/en/docs/drivers/includes/hyperv_usage.inc similarity index 100% rename from site/content/en/docs/Reference/Drivers/includes/hyperv_usage.inc rename to site/content/en/docs/drivers/includes/hyperv_usage.inc diff --git a/site/content/en/docs/Reference/Drivers/includes/kvm2_usage.inc b/site/content/en/docs/drivers/includes/kvm2_usage.inc similarity index 100% rename from site/content/en/docs/Reference/Drivers/includes/kvm2_usage.inc rename to site/content/en/docs/drivers/includes/kvm2_usage.inc diff --git a/site/content/en/docs/Reference/Drivers/includes/none_usage.inc b/site/content/en/docs/drivers/includes/none_usage.inc similarity index 100% rename from site/content/en/docs/Reference/Drivers/includes/none_usage.inc rename to site/content/en/docs/drivers/includes/none_usage.inc diff --git a/site/content/en/docs/Reference/Drivers/includes/parallels_usage.inc b/site/content/en/docs/drivers/includes/parallels_usage.inc similarity index 100% rename from site/content/en/docs/Reference/Drivers/includes/parallels_usage.inc rename to site/content/en/docs/drivers/includes/parallels_usage.inc diff --git a/site/content/en/docs/drivers/includes/podman_usage.inc b/site/content/en/docs/drivers/includes/podman_usage.inc new file mode 100644 index 0000000000..186c5bb604 --- /dev/null +++ b/site/content/en/docs/drivers/includes/podman_usage.inc @@ -0,0 +1,21 @@ +## experimental + +This is an experimental driver. please use it only for experimental reasons. +for a better kubernetes in container experience, use docker [driver](https://minikube.sigs.k8s.io/Drivers/docker). + +## Install Podman + +- [Podman](https://podman.io/getting-started/installation.html) + +## Usage + +Start a cluster using the docker driver: + +```shell +minikube start --driver=podman +``` +To make docker the default driver: + +```shell +minikube config set driver podman +``` diff --git a/site/content/en/docs/Reference/Drivers/includes/virtualbox_usage.inc b/site/content/en/docs/drivers/includes/virtualbox_usage.inc similarity index 100% rename from site/content/en/docs/Reference/Drivers/includes/virtualbox_usage.inc rename to site/content/en/docs/drivers/includes/virtualbox_usage.inc diff --git a/site/content/en/docs/Reference/Drivers/includes/vmware_macos_usage.inc b/site/content/en/docs/drivers/includes/vmware_macos_usage.inc similarity index 100% rename from site/content/en/docs/Reference/Drivers/includes/vmware_macos_usage.inc rename to site/content/en/docs/drivers/includes/vmware_macos_usage.inc diff --git a/site/content/en/docs/Reference/Drivers/kvm2.md b/site/content/en/docs/drivers/kvm2.md similarity index 88% rename from site/content/en/docs/Reference/Drivers/kvm2.md rename to site/content/en/docs/drivers/kvm2.md index df13a3f95d..b447745524 100644 --- a/site/content/en/docs/Reference/Drivers/kvm2.md +++ b/site/content/en/docs/drivers/kvm2.md @@ -1,18 +1,22 @@ --- title: "kvm2" -linkTitle: "kvm2" weight: 2 -date: 2017-01-05 -date: 2018-08-05 description: > Linux KVM (Kernel-based Virtual Machine) driver +aliases: + - /docs/reference/drivers/kvm2 --- + ## Overview [KVM (Kernel-based Virtual Machine)](https://www.linux-kvm.org/page/Main_Page) is a full virtualization solution for Linux on x86 hardware containing virtualization extensions. To work with KVM, minikube uses the [libvirt virtualization API](https://libvirt.org/) -{{% readfile file="/docs/Reference/Drivers/includes/kvm2_usage.inc" %}} +{{% readfile file="/docs/drivers/includes/kvm2_usage.inc" %}} + +## Check virtualization support + +{{% readfile file="/docs/drivers/includes/check_virtualization_linux.inc" %}} ## Special features diff --git a/site/content/en/docs/Reference/Drivers/none.md b/site/content/en/docs/drivers/none.md similarity index 81% rename from site/content/en/docs/Reference/Drivers/none.md rename to site/content/en/docs/drivers/none.md index 2040128bdf..363c950db5 100644 --- a/site/content/en/docs/Reference/Drivers/none.md +++ b/site/content/en/docs/drivers/none.md @@ -1,18 +1,22 @@ --- title: "none" -linkTitle: "none" weight: 3 -date: 2017-01-05 -date: 2018-08-05 description: > Linux none (bare-metal) driver +aliases: + - /docs/reference/drivers/none --- ## Overview -This document is written for system integrators who are familiar with minikube, and wish to run it within a customized VM environment. The `none` driver allows advanced minikube users to skip VM creation, allowing minikube to be run on a user-supplied VM. +{{% pageinfo %}} +Most users of this driver should consider the newer [Docker driver]({{< ref "docker.md" >}}), as it is +significantly easier to configure and does not require root access. The 'none' driver is recommended for advanced users only. +{{% /pageinfo %}} -{{% readfile file="/docs/Reference/Drivers/includes/none_usage.inc" %}} +This document is written for system integrators who wish to run minikube within a customized VM environment. The `none` driver allows advanced minikube users to skip VM creation, allowing minikube to be run on a user-supplied VM. + +{{% readfile file="/docs/drivers/includes/none_usage.inc" %}} ## Issues diff --git a/site/content/en/docs/Reference/Drivers/parallels.md b/site/content/en/docs/drivers/parallels.md similarity index 71% rename from site/content/en/docs/Reference/Drivers/parallels.md rename to site/content/en/docs/drivers/parallels.md index 8bcfbb0222..6498a913a1 100644 --- a/site/content/en/docs/Reference/Drivers/parallels.md +++ b/site/content/en/docs/drivers/parallels.md @@ -1,17 +1,15 @@ --- title: "parallels" -linkTitle: "parallels" weight: 4 -date: 2018-08-08 -description: > - Parallels driver +aliases: + - /docs/reference/drivers/parallels --- ## Overview The Parallels driver is particularly useful for users who own Parallels Desktop, as it does not require VT-x hardware support. -{{% readfile file="/docs/Reference/Drivers/includes/parallels_usage.inc" %}} +{{% readfile file="/docs/drivers/includes/parallels_usage.inc" %}} ## Issues diff --git a/site/content/en/docs/drivers/podman.md b/site/content/en/docs/drivers/podman.md new file mode 100644 index 0000000000..93854fa899 --- /dev/null +++ b/site/content/en/docs/drivers/podman.md @@ -0,0 +1,22 @@ +--- +title: "podman" +weight: 3 +aliases: + - /docs/reference/drivers/podman +--- + +## Overview + +{{% pageinfo %}} +This driver is experimental and in active development. Help wanted! +{{% /pageinfo %}} + +The podman driver is another kubernetes in container driver for minikube. similar to [docker](https://minikube.sigs.k8s.io/Drivers/docker/) driver. The podman driver is experimental, and only supported on Linux and macOS (with a remote podman server) + +## Try it with CRI-O container runtime. + +```shell +minikube start --driver=podman --container-runtime=cri-o +``` + +{{% readfile file="/docs/drivers/includes/podman_usage.inc" %}} diff --git a/site/content/en/docs/Reference/Drivers/virtualbox.md b/site/content/en/docs/drivers/virtualbox.md similarity index 64% rename from site/content/en/docs/Reference/Drivers/virtualbox.md rename to site/content/en/docs/drivers/virtualbox.md index f979ad499e..b9158788ad 100644 --- a/site/content/en/docs/Reference/Drivers/virtualbox.md +++ b/site/content/en/docs/drivers/virtualbox.md @@ -1,17 +1,15 @@ --- title: "virtualbox" -linkTitle: "virtualbox" weight: 5 -date: 2018-08-08 -description: > - VirtualBox driver +aliases: + - /docs/reference/drivers/virtualbox --- ## Overview -VirtualBox is the oldest and most stable VM driver for minikube. +VirtualBox is minikube's original driver. It may not provide the fastest start-up time, but it is the most stable driver available for users of Microsoft Windows Home. -{{% readfile file="/docs/Reference/Drivers/includes/virtualbox_usage.inc" %}} +{{% readfile file="/docs/drivers/includes/virtualbox_usage.inc" %}} ## Special features diff --git a/site/content/en/docs/Reference/Drivers/vmware.md b/site/content/en/docs/drivers/vmware.md similarity index 78% rename from site/content/en/docs/Reference/Drivers/vmware.md rename to site/content/en/docs/drivers/vmware.md index cd355144aa..89403bd4b3 100644 --- a/site/content/en/docs/Reference/Drivers/vmware.md +++ b/site/content/en/docs/drivers/vmware.md @@ -1,10 +1,8 @@ --- title: "vmware" -linkTitle: "vmware" weight: 6 -date: 2018-08-08 -description: > - VMware driver +aliases: + - /docs/reference/drivers/vmware --- ## Overview @@ -13,7 +11,7 @@ The vmware driver supports virtualization across all VMware based hypervisors. {{% tabs %}} {{% tab "macOS" %}} -{{% readfile file="/docs/Reference/Drivers/includes/vmware_macos_usage.inc" %}} +{{% readfile file="/docs/drivers/includes/vmware_macos_usage.inc" %}} {{% /tab %}} {{% tab "Linux" %}} No documentation is available yet. diff --git a/site/content/en/docs/faq/_index.md b/site/content/en/docs/faq/_index.md new file mode 100644 index 0000000000..321bff9bae --- /dev/null +++ b/site/content/en/docs/faq/_index.md @@ -0,0 +1,19 @@ +--- +title: "FAQ" +linkTitle: "FAQ" +weight: 3 +description: > + Questions that come up regularly +--- + +## Operating-systems + +## Linux + +### Preventing password prompts + +The easiest approach is to use the `docker` driver, as the backend service always runs as `root`. + +`none` users may want to try `CHANGE_MINIKUBE_NONE_USER=true`, where kubectl and such will still work: [see environment variables](https://minikube.sigs.k8s.io/reference/environment_variables/) + +Alternatively, configure `sudo` to never prompt for the commands issued by minikube. diff --git a/site/content/en/featured-background.jpg b/site/content/en/docs/featured-background.jpg similarity index 100% rename from site/content/en/featured-background.jpg rename to site/content/en/docs/featured-background.jpg diff --git a/site/content/en/docs/handbook/_index.md b/site/content/en/docs/handbook/_index.md new file mode 100644 index 0000000000..d73d6c2a8c --- /dev/null +++ b/site/content/en/docs/handbook/_index.md @@ -0,0 +1,8 @@ +--- +title: "Handbook" +weight: 2 +description: > + How to perform common tasks with minikube +aliases: + - /docs/start/examples/ +--- diff --git a/site/content/en/docs/Tasks/loadbalancer.md b/site/content/en/docs/handbook/accessing.md similarity index 64% rename from site/content/en/docs/Tasks/loadbalancer.md rename to site/content/en/docs/handbook/accessing.md index 4ec2f0f06d..3b79ecfd2b 100644 --- a/site/content/en/docs/Tasks/loadbalancer.md +++ b/site/content/en/docs/handbook/accessing.md @@ -1,13 +1,48 @@ --- -title: "LoadBalancer access" -linkTitle: "LoadBalancer access" -weight: 6 -date: 2018-08-02 +title: "Accessing apps" +weight: 3 description: > - How to access a LoadBalancer service in minikube + How to access applications running within minikube +aliases: + - /docs/tasks/loadbalancer + - /docs/tasks/nodeport --- -## Overview +There are two major categories of services in Kubernetes: + +* NodePort +* LoadBalancer + +minikube supports either. Read on! + +## NodePort access + +A NodePort service is the most basic way to get external traffic directly to your service. NodePort, as the name implies, opens a specific port, and any traffic that is sent to this port is forwarded to the service. + +### Getting the NodePort using the service command + +We also have a shortcut for fetching the minikube IP and a service's `NodePort`: + +`minikube service --url $SERVICE` + +## Getting the NodePort using kubectl + +The minikube VM is exposed to the host system via a host-only IP address, that can be obtained with the `minikube ip` command. Any services of type `NodePort` can be accessed over that IP address, on the NodePort. + +To determine the NodePort for your service, you can use a `kubectl` command like this (note that `nodePort` begins with lowercase `n` in JSON output): + +`kubectl get service $SERVICE --output='jsonpath="{.spec.ports[0].nodePort}"'` + +### Increasing the NodePort range + +By default, minikube only exposes ports 30000-32767. If this does not work for you, you can adjust the range by using: + +`minikube start --extra-config=apiserver.service-node-port-range=1-65535` + +This flag also accepts a comma separated list of ports and port ranges. + +---- +## LoadBalancer access A LoadBalancer service is the standard way to expose a service to the internet. With this method, each service gets its own IP address. diff --git a/site/content/en/docs/handbook/config.md b/site/content/en/docs/handbook/config.md new file mode 100644 index 0000000000..9479c3c691 --- /dev/null +++ b/site/content/en/docs/handbook/config.md @@ -0,0 +1,131 @@ +--- +title: "Configuration" +weight: 4 +description: > + Configuring your cluster +--- + +## Basic Configuration + +Most minikube configuration is done via the flags interface. To see which flags are possible for the start command, run: + +```shell +minikube start --help +``` + +## Persistent Configuration + +minikube allows users to persistently store new default values to be used across all profiles, using the `minikube config` command. This is done providing a property name, and a property value. + +For example, to persistently configure minikube to use hyperkit: + +```shell +minikube config set driver hyperkit +``` + +You can get a complete list of configurable fields using: + +```shell +minikube config --help +``` + +To get a list of the currently set config properties: + +```shell +minikube config view +``` + +## Kubernetes configuration + +minikube allows users to configure the Kubernetes components with arbitrary values. To use this feature, you can use the `--extra-config` flag on the `minikube start` command. + +This flag is repeated, so you can pass it several times with several different values to set multiple options. + +### Selecting a Kubernetes version + +By default, minikube installs the latest stable version of Kubernetes that was available at the time of the minikube release. You may select a different Kubernetes release by using the `--kubernetes-version` flag, for example: + +`minikube start --kubernetes-version=v1.11.10` + +minikube follows the [Kubernetes Version and Version Skew Support Policy](https://kubernetes.io/docs/setup/version-skew-policy/), so we guarantee support for the latest build for the last 3 minor Kubernetes releases. When practical, minikube aims to support older releases as well so that users can emulate legacy environments. + +For up to date information on supported versions, see `OldestKubernetesVersion` and `NewestKubernetesVersion` in [constants.go](https://github.com/kubernetes/minikube/blob/master/pkg/minikube/constants/constants.go) + +### Enabling feature gates + +Kubernetes alpha/experimental features can be enabled or disabled by the `--feature-gates` flag on the `minikube start` command. It takes a string of the form `key=value` where key is the `component` name and value is the `status` of it. + +```shell +minikube start --feature-gates=EphemeralContainers=true +``` + +### Modifying Kubernetes defaults + +The kubeadm bootstrapper can be configured by the `--extra-config` flag on the `minikube start` command. It takes a string of the form `component.key=value` where `component` is one of the strings + +* kubeadm +* kubelet +* apiserver +* controller-manager +* scheduler + +and `key=value` is a flag=value pair for the component being configured. For example, + +```shell +minikube start --extra-config=apiserver.v=10 --extra-config=kubelet.max-pods=100 +``` + +For instance, to allow Kubernetes to launch on an unsupported Docker release: + +```shell +minikube start --extra-config=kubeadm.ignore-preflight-errors=SystemVerification +``` + +## Runtime configuration + +The default container runtime in minikube is Docker. You can select it explicitly by using: + +```shell +minikube start --container-runtime=docker +``` + +You can also select: + +* *[containerd](https://github.com/containerd/containerd): +* `cri-o`: [CRI-O](https://github.com/kubernetes-sigs/cri-o): + +## Environment variables + +minikube supports passing environment variables instead of flags for every value listed in `minikube config`. This is done by passing an environment variable with the prefix `MINIKUBE_`. + +For example the `minikube start --iso-url="$ISO_URL"` flag can also be set by setting the `MINIKUBE_ISO_URL="$ISO_URL"` environment variable. + +### Exclusive environment tunings + +Some features can only be accessed by minikube specific environment variables, here is a list of these features: + +* **MINIKUBE_HOME** - (string) sets the path for the .minikube directory that minikube uses for state/configuration. *Please note: this is used only by minikube and does not affect anything related to Kubernetes tools such as kubectl.* + +* **MINIKUBE_IN_STYLE** - (bool) manually sets whether or not emoji and colors should appear in minikube. Set to false or 0 to disable this feature, true or 1 to force it to be turned on. + +* **MINIKUBE_WANTUPDATENOTIFICATION** - (bool) sets whether the user wants an update notification for new minikube versions + +* **MINIKUBE_REMINDERWAITPERIODINHOURS** - (int) sets the number of hours to check for an update notification + +* **CHANGE_MINIKUBE_NONE_USER** - (bool) automatically change ownership of ~/.minikube to the value of $SUDO_USER + +* **MINIKUBE_ENABLE_PROFILING** - (int, `1` enables it) enables trace profiling to be generated for minikube + +### Example: Disabling emoji + +```shell +export MINIKUBE_IN_STYLE=false +minikube start +``` + +### Making environment values persistent + +To make the exported variables persistent across reboots: + +* Linux and macOS: Add these declarations to `~/.bashrc` or wherever your shells environment variables are stored. +* Windows: Add these declarations via [system settings](https://support.microsoft.com/en-au/help/310519/how-to-manage-environment-variables-in-windows-xp) or using [setx](https://stackoverflow.com/questions/5898131/set-a-persistent-environment-variable-from-cmd-exe) diff --git a/site/content/en/docs/Examples/_index.md b/site/content/en/docs/handbook/controls.md similarity index 87% rename from site/content/en/docs/Examples/_index.md rename to site/content/en/docs/handbook/controls.md index c9e665ae05..18866c8500 100755 --- a/site/content/en/docs/Examples/_index.md +++ b/site/content/en/docs/handbook/controls.md @@ -1,11 +1,11 @@ --- -title: "Examples" -linkTitle: "Examples" -weight: 3 -date: 2017-01-05 +title: "Basic controls" +weight: 2 description: > See minikube in action! +aliases: + - /docs/examples/ --- Start a cluster by running: @@ -28,6 +28,10 @@ minikube makes it easy to open this exposed endpoint in your browser: `minikube service hello-minikube` +Upgrade your cluster: + +`minikube start --kubernetes-version=latest` + Start a second local cluster (_note: This will not work if minikube is using the bare-metal/none driver_): `minikube start -p cluster2` diff --git a/site/content/en/docs/Tasks/dashboard.md b/site/content/en/docs/handbook/dashboard.md similarity index 90% rename from site/content/en/docs/Tasks/dashboard.md rename to site/content/en/docs/handbook/dashboard.md index 3fec853830..b0dd904408 100644 --- a/site/content/en/docs/Tasks/dashboard.md +++ b/site/content/en/docs/handbook/dashboard.md @@ -1,9 +1,10 @@ --- title: "Dashboard" -date: 2019-07-31 -weight: 1 +weight: 4 description: > - Using the Kubernetes Dashboard + Dashboard +aliases: + - /docs/tasks/dashboard/ --- minikube has integrated support for the [Kubernetes Dashboard UI](https://github.com/kubernetes/dashboard). @@ -46,4 +47,4 @@ minikube dashboard --url ## Reference -For additional information, see [the official Dashboard documentation](https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/). +For additional information, see [the official Dashboard documentation](https://kubernetes.io/docs/Handbook/access-application-cluster/web-ui-dashboard/). diff --git a/site/content/en/docs/handbook/deploying.md b/site/content/en/docs/handbook/deploying.md new file mode 100644 index 0000000000..209d12b1a9 --- /dev/null +++ b/site/content/en/docs/handbook/deploying.md @@ -0,0 +1,48 @@ +--- +title: "Deploying apps" +weight: 2 +description: > + How to deploy an application to minikube +aliases: + - /docs/tasks/addons +--- + +## kubectl + +``` +kubectl create deployment hello-minikube1 --image=k8s.gcr.io/echoserver:1.4 +kubectl expose deployment hello-minikube1 --type=LoadBalancer --port=8080 +``` + +## Addons + +minikube has a built-in list of applications and services that may be easily deployed, such as Istio or Ingress. To list the available addons for your version of minikube: + + +```shell +minikube addons list +``` + +To enable an add-on, see: +```shell +minikube addons enable +``` + +To enable an addon at start-up: + +```shell +minikube start --addons +``` + +For addons that expose a browser endpoint, you can quickly open them with: + +```shell +minikube addons open +``` + +To disable an addon: + + +```shell +minikube addons disable +``` diff --git a/site/content/en/docs/Tasks/sync.md b/site/content/en/docs/handbook/filesync.md similarity index 96% rename from site/content/en/docs/Tasks/sync.md rename to site/content/en/docs/handbook/filesync.md index fb39f5dc87..bfa1551349 100644 --- a/site/content/en/docs/Tasks/sync.md +++ b/site/content/en/docs/handbook/filesync.md @@ -1,10 +1,10 @@ --- title: "File Sync" -linkTitle: "File Sync" -weight: 6 -date: 2019-08-01 +weight: 12 description: > How to sync files into minikube +aliases: + - /docs/tasks/sync/ --- ## Built-in sync diff --git a/site/content/en/docs/Tasks/accessing-host-resources.md b/site/content/en/docs/handbook/host-access.md similarity index 92% rename from site/content/en/docs/Tasks/accessing-host-resources.md rename to site/content/en/docs/handbook/host-access.md index 421eb0afb5..da419d0c94 100644 --- a/site/content/en/docs/Tasks/accessing-host-resources.md +++ b/site/content/en/docs/handbook/host-access.md @@ -1,9 +1,11 @@ --- -title: "Accessing host resources" +title: "Host access" date: 2017-01-05 -weight: 2 +weight: 9 description: > How to access host resources from a pod +aliases: + - docs/tasks/accessing-host-resources/ --- {{% pageinfo %}} diff --git a/site/content/en/docs/Tasks/mount.md b/site/content/en/docs/handbook/mount.md similarity index 94% rename from site/content/en/docs/Tasks/mount.md rename to site/content/en/docs/handbook/mount.md index d7b19f05c4..40033f5bcd 100644 --- a/site/content/en/docs/Tasks/mount.md +++ b/site/content/en/docs/handbook/mount.md @@ -1,9 +1,11 @@ --- -title: "Filesystem mounts" +title: "Mounting filesystems" date: 2017-01-05 -weight: 4 +weight: 11 description: > How to mount a host directory into the VM +aliases: + - /docs/tasks/mount --- ## 9P Mounts @@ -78,4 +80,4 @@ These mounts can be disabled by passing `--disable-driver-mounts` to `minikube s ## File Sync -See [File Sync]({{}}) +See [File Sync]({{}}) diff --git a/site/content/en/docs/Reference/disk_cache.md b/site/content/en/docs/handbook/offline.md similarity index 89% rename from site/content/en/docs/Reference/disk_cache.md rename to site/content/en/docs/handbook/offline.md index 84d43112ef..d9238520fb 100644 --- a/site/content/en/docs/Reference/disk_cache.md +++ b/site/content/en/docs/handbook/offline.md @@ -1,10 +1,10 @@ --- -title: "Disk cache" -linkTitle: "Disk cache" -weight: 6 +title: "Offline usage" +linkTitle: "Offline usage" +weight: 8 date: 2019-08-01 description: > - Cache Rules Everything Around Minikube + Cache Rules Everything Around minikube --- minikube has built-in support for caching downloaded resources into `$MINIKUBE_HOME/cache`. Here are the important file locations: @@ -16,12 +16,10 @@ minikube has built-in support for caching downloaded resources into `$MINIKUBE_H ## Kubernetes image cache +NOTE: the `none` driver caches images directly into Docker rather than a separate disk cache. + `minikube start` caches all required Kubernetes images by default. This default may be changed by setting `--cache-images=false`. These images are not displayed by the `minikube cache` command. -## Arbitrary docker image cache - -See [Tasks: Caching images]({{< ref "/docs/tasks/caching.md" >}}) - ## Sharing the minikube cache For offline use on other hosts, one can copy the contents of `~/.minikube/cache`. As of the v1.0 release, this directory contains 685MB of data: diff --git a/site/content/en/docs/Reference/persistent_volumes.md b/site/content/en/docs/handbook/persistent_volumes.md similarity index 96% rename from site/content/en/docs/Reference/persistent_volumes.md rename to site/content/en/docs/handbook/persistent_volumes.md index 6b7a38b83b..0da5465b7b 100644 --- a/site/content/en/docs/Reference/persistent_volumes.md +++ b/site/content/en/docs/handbook/persistent_volumes.md @@ -1,13 +1,13 @@ --- title: "Persistent Volumes" linkTitle: "Persistent Volumes" -weight: 6 +weight: 10 date: 2019-08-01 description: > About persistent volumes (hostPath) --- -minikube supports [PersistentVolumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) of type `hostPath` out of the box. These PersistentVolumes are mapped to a directory inside the running Minikube instance (usually a VM, unless you use `--driver=none`, `--driver=docker`, or `--driver=podman`). For more information on how this works, read the Dynamic Provisioning section below. +minikube supports [PersistentVolumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) of type `hostPath` out of the box. These PersistentVolumes are mapped to a directory inside the running minikube instance (usually a VM, unless you use `--driver=none`, `--driver=docker`, or `--driver=podman`). For more information on how this works, read the Dynamic Provisioning section below. ## A note on mounts, persistence, and minikube hosts diff --git a/site/content/en/docs/handbook/pushing.md b/site/content/en/docs/handbook/pushing.md new file mode 100644 index 0000000000..32c58edf2f --- /dev/null +++ b/site/content/en/docs/handbook/pushing.md @@ -0,0 +1,195 @@ +--- +title: "Pushing images" +weight: 5 +description: > + comparing 5 ways to push your image into a minikiube cluster. +aliases: + - /docs/tasks/building + - /docs/tasks/caching + - /docs/tasks/podman_service + - /docs/tasks/docker_daemon +--- + +## Comparison table for different methods +The best method to push your image to minikube depends on the container-runtime you built your cluster with (default docker). +Here is a comparison table to help you choose: + + +| Method | Supported Runtimes | Issues | Performance | +|--- |--- |--- |--- |--- | +| [docker-env command](/docs/handbook/pushing/#1pushing-directly-to-the-in-cluster-docker-daemon-docker-env) | only docker | | good | +| [podman-env command](/docs/handbook/pushing/#3-pushing-directly-to-in-cluster-crio-podman-env) | only cri-o | | good | +| [cache add command](/pushing/#2-push-images-using-cache-command) | all | | ok | +| [registry addon](/docs/handbook/pushing/#4-pushing-to-an-in-cluster-using-registry-addon) | all | work in progress for [docker on mac](https://github.com/kubernetes/minikube/issues/7535) | ok | +| [minikube ssh](/docs/handbook/pushing/#5-building-images-inside-of-minikube-using-ssh) | all | | best | + + +* note1 : the default container-runtime on minikube is 'docker'. +* note2 : 'none' driver (bare metal) does not need pushing image to the cluster, as any image on your system is already available to the kuberentes. + +--- + +## 1.Pushing directly to the in-cluster Docker daemon (docker-env) +When using a container or VM driver (all drivers except none), you can reuse the Docker daemon inside minikube cluster. +this means you don't have to build on your host machine and push the image into a docker registry. You can just build inside the same docker daemon as minikube which speeds up local experiments. + +To point your terminal to use the docker daemon inside minikube run this: + +```shell +eval $(minikube docker-env) +``` + +now any 'docker' command you run in this current terminal will run against the docker inside minikube cluster. + +so if you do the following commands, it will show you the containers inside the minikube inside minikube's VM or Container. + +```shell +docker ps +``` + +now you can 'build' against the docker inside minikube. which is instantly accessible to kubernetes cluster. + +```shell +docker build -t my_image . +``` + +To verify your terminal is using minikuber's docker-env you can check the value of the environment variable MINIKUBE_ACTIVE_DOCKERD to reflect the cluster name. + +{{% pageinfo color="info" %}} +Tip 1: +Remember to turn off the `imagePullPolicy:Always` (use `imagePullPolicy:IfNotPresent` or `imagePullPolicy:Never`) in your yaml file.otherwise Kubernetes won't use your locally build image and it will pull from the network. +{{% /pageinfo %}} + +{{% pageinfo color="info" %}} +Tip 2: +Evaluating the docker-env is only valid for the current terminal. +and by closing the terminal, you will go back to using your own system's docker daemon. +{{% /pageinfo %}} + +{{% pageinfo color="info" %}} +Tip 3: +In container-based drivers such as Docker or Podman, you will need to re-do docker-env each time you restart your minikube cluster. +{{% /pageinfo %}} + + +more information on [docker-env](https://minikube.sigs.k8s.io/docs/commands/docker-env/) + +--- + +## 2. Push images using 'cache' command. + +From your host, you can push a Docker image directly to minikube. This image will be cached and automatically pulled into all future minikube clusters created on the machine + +```shell +minikube cache add alpine:latest +``` + +The add command will store the requested image to `$MINIKUBE_HOME/cache/images`, and load it into the minikube cluster's container runtime environment automatically. + +{{% pageinfo color="info" %}} +Tip 1 : +If your image changes after your cached it, you need to do 'cache reload'. +{{% /pageinfo %}} + + +minikube refreshes the cache images on each start. however to reload all the cached images on demand run this command : +```shell +minikube cache reload +``` + +{{% pageinfo color="info" %}} +Tip 2 : +if you have multiple cluster, cache command will load the image for all of them. +{{% /pageinfo %}} + + +To display images you have added to the cache: + +```shell +minikube cache list +``` + +This listing will not include the images minikube's built-in system images. + + + +```shell +minikube cache delete +``` + +For more information, see: + +* [Reference: cache command]({{< ref "/docs/commands/cache.md" >}}) + +--- + +## 3. Pushing directly to in-cluster CRIO. (podman-env) + +This is simmilar to docker-env but only for cri-o runtime. +To push directly to CRIO, configure podman client on your mac/linux host using the podman-env command in your shell: + +```shell +eval $(minikube podman-env) +``` + +You should now be able to use podman on the command line on your host mac/linux machine talking to the podman service inside the minikube VM: + +```shell +podman-remote help +``` + +Remember to turn off the `imagePullPolicy:Always` (use `imagePullPolicy:IfNotPresent` or `imagePullPolicy:Never`), as otherwise Kubernetes won't use images you built locally. + +--- + +## 4. Pushing to an in-cluster using Registry addon + +For illustration purpose, we will assume that minikube VM has one of the ip from `192.168.39.0/24` subnet. If you have not overridden these subnets as per [networking guide](https://minikube.sigs.k8s.io/reference/networking/), you can find out default subnet being used by minikube for a specific OS and driver combination [here](https://github.com/kubernetes/minikube/blob/dfd9b6b83d0ca2eeab55588a16032688bc26c348/pkg/minikube/cluster/cluster.go#L408) which is subject to change. Replace `192.168.39.0/24` with appropriate values for your environment wherever applicable. + +Ensure that docker is configured to use `192.168.39.0/24` as insecure registry. Refer [here](https://docs.docker.com/registry/insecure/) for instructions. + +Ensure that `192.168.39.0/24` is enabled as insecure registry in minikube. Refer [here](https://minikube.sigs.k8s.io/Handbook/registry/insecure/) for instructions.. + +Enable minikube registry addon: + +```shell +minikube addons enable registry +``` + +Build docker image and tag it appropriately: + +```shell +docker build --tag $(minikube ip):5000/test-img . +``` + +Push docker image to minikube registry: + +```shell +docker push $(minikube ip):5000/test-img +``` + +--- + +## 5. Building images inside of minikube using SSH + +Use `minikube ssh` to run commands inside the minikube node, and run the `docker build` directly there. +Any command you run there will run against the same daemon that kubernetes cluster is using. + +```shell +docker build +``` + +For more information on the `docker build` command, read the [Docker documentation](https://docs.docker.com/engine/reference/commandline/build/) (docker.com). + +For Podman, use: + +```shell +sudo -E podman build +``` + +For more information on the `podman build` command, read the [Podman documentation](https://github.com/containers/libpod/blob/master/docs/source/markdown/podman-build.1.md) (podman.io). + +to exit minikube ssh and come back to your terminal type: +```shell +exit +``` diff --git a/site/content/en/docs/Tasks/Registry/insecure.md b/site/content/en/docs/handbook/registry.md similarity index 65% rename from site/content/en/docs/Tasks/Registry/insecure.md rename to site/content/en/docs/handbook/registry.md index e3f1116014..5d6d8d8385 100644 --- a/site/content/en/docs/Tasks/Registry/insecure.md +++ b/site/content/en/docs/handbook/registry.md @@ -1,12 +1,40 @@ --- -title: "Insecure" -linkTitle: "Insecure" +title: "Registries" +linkTitle: "Registries" weight: 6 -date: 2019-08-1 description: > - How to enable insecure registry support within minikube + How to interact with registries +aliases: + - /docs/tasks/registry + - /docs/tasks/docker_registry + - /docs/tasks/registry/private + - /docs/tasks/registry/insecure --- +## Using a Private Registry + +**GCR/ECR/ACR/Docker**: minikube has an addon, `registry-creds` which maps credentials into minikube to support pulling from Google Container Registry (GCR), Amazon's EC2 Container Registry (ECR), Azure Container Registry (ACR), and Private Docker registries. You will need to run `minikube addons configure registry-creds` and `minikube addons enable registry-creds` to get up and running. An example of this is below: + +```shell +$ minikube addons configure registry-creds +Do you want to enable AWS Elastic Container Registry? [y/n]: n + +Do you want to enable Google Container Registry? [y/n]: y +-- Enter path to credentials (e.g. /home/user/.config/gcloud/application_default_credentials.json):/home/user/.config/gcloud/application_default_credentials.json + +Do you want to enable Docker Registry? [y/n]: n + +Do you want to enable Azure Container Registry? [y/n]: n +registry-creds was successfully configured +$ minikube addons enable registry-creds +``` + +For additional information on private container registries, see [this page](https://kubernetes.io/docs/Handbook/configure-pod-container/pull-image-private-registry/). + +We recommend you use _ImagePullSecrets_, but if you would like to configure access on the minikube VM you can place the `.dockercfg` in the `/home/docker` directory or the `config.json` in the `/var/lib/kubelet` directory. Make sure to restart your kubelet (for kubeadm) process with `sudo systemctl restart kubelet`. + +## Enabling Insecure Registries + minikube allows users to configure the docker engine's `--insecure-registry` flag. You can use the `--insecure-registry` flag on the @@ -78,3 +106,5 @@ docker push localhost:5000/myimage ``` After the image is pushed, refer to it by `localhost:5000/{name}` in kubectl specs. + +## \ No newline at end of file diff --git a/site/content/en/docs/Tasks/debug.md b/site/content/en/docs/handbook/troubleshooting.md similarity index 94% rename from site/content/en/docs/Tasks/debug.md rename to site/content/en/docs/handbook/troubleshooting.md index bab4de45c7..0f8e4a1432 100644 --- a/site/content/en/docs/Tasks/debug.md +++ b/site/content/en/docs/handbook/troubleshooting.md @@ -1,10 +1,8 @@ --- -title: "Debugging" -linkTitle: "Debugging" -weight: 9 -date: 2019-08-01 +title: "Troubleshooting" +weight: 20 description: > - How to debug issues within minikube + How to troubleshoot minikube issues --- ## Enabling debug logs diff --git a/site/content/en/docs/handbook/uninstalling_minikube.md b/site/content/en/docs/handbook/uninstalling_minikube.md new file mode 100644 index 0000000000..b01809f513 --- /dev/null +++ b/site/content/en/docs/handbook/uninstalling_minikube.md @@ -0,0 +1,45 @@ +--- +title: "Uninstall" +linkTitle: "Uninstall" +weight: 99 +draft: true +date: 2019-08-18 +description: > + Reference on uninstalling minikube +--- + +NOTE: To be moved to the FAQ + +## Chocolatey + +- Open a command prompt with Administrator privileges. +- Run `minikube delete --purge --all` +- Run, `choco uninstall minikube` to remove the minikube package from your system. + +## Windows Installer + +- Open a command prompt with Administrator privileges. +- Run `minikube delete --purge --all` +- Open the Run dialog box (**Win+R**), type in `appwiz.cpl` and hit **Enter** key. +- In there, find an entry for the Minikube installer, right click on it & click on **Uninstall**. + +## Binary/Direct + +- Open a command prompt with Administrator privileges. +- Run `minikube delete --purge --all` +- Delete the minikube binary. + +## Debian/Ubuntu (Deb) + +- Run `minikube delete --purge --all` +- Run `sudo dpkg -P minikube` + +## Fedora/Red Hat (RPM) + +- Run `minikube delete --purge --all` +- Run `sudo rpm -e minikube` + +## Brew + +- Run `minikube delete --purge --all` +- Run `brew uninstall minikube` diff --git a/site/content/en/docs/handbook/untrusted_certs.md b/site/content/en/docs/handbook/untrusted_certs.md new file mode 100644 index 0000000000..049f9075d7 --- /dev/null +++ b/site/content/en/docs/handbook/untrusted_certs.md @@ -0,0 +1,35 @@ +--- +title: "Certificates" +weight: 7 +date: 2019-08-15 +description: > + All about TLS certificates +--- + +## Untrusted Root Certificates + +Many organizations deploy their own Root Certificate and CA service inside the corporate networks. +Internal websites, image repositories and other resources may install SSL server certificates issued by this CA service for security and privacy concerns. + +You may install the Root Certificate into the minikube cluster to access these corporate resources within the cluster. + +### Tutorial + +You will need a corporate X.509 Root Certificate in PEM format. If it's in DER format, convert it: + +``` +openssl x509 -inform der -in my_company.cer -out my_company.pem +``` + +Copy the certificate into the certs directory: + +```shell +mkdir -p $HOME/.minikube/certs +cp my_company.pem $HOME/.minikube/certs/my_company.pem +``` + +Then restart minikube to sync the certificates: + +```shell +minikube start +``` diff --git a/site/content/en/docs/Reference/Networking/proxy.md b/site/content/en/docs/handbook/vpn_and_proxy.md similarity index 71% rename from site/content/en/docs/Reference/Networking/proxy.md rename to site/content/en/docs/handbook/vpn_and_proxy.md index e82fece322..08c8e1c5ff 100644 --- a/site/content/en/docs/Reference/Networking/proxy.md +++ b/site/content/en/docs/handbook/vpn_and_proxy.md @@ -1,13 +1,15 @@ --- -title: "HTTP Proxies" -linkTitle: "HTTP Proxies" +title: "Proxies & VPN's" weight: 6 -date: 2017-01-05 description: > - How to use an HTTP/HTTPS proxy with minikube + How to use minikube with a VPN or HTTP/HTTPS Proxy --- -minikube requires access to the internet via HTTP, HTTPS, and DNS protocols. If a HTTP proxy is required to access the internet, you may need to pass the proxy connection information to both minikube and Docker using environment variables: +minikube requires access to the internet via HTTP, HTTPS, and DNS protocols. + +## Proxy + +If a HTTP proxy is required to access the internet, you may need to pass the proxy connection information to both minikube and Docker using environment variables: * `HTTP_PROXY` - The URL to your HTTP proxy * `HTTPS_PROXY` - The URL to your HTTPS proxy @@ -47,20 +49,9 @@ minikube start To set these environment variables permanently, consider adding these to your [system settings](https://support.microsoft.com/en-au/help/310519/how-to-manage-environment-variables-in-windows-xp) or using [setx](https://stackoverflow.com/questions/5898131/set-a-persistent-environment-variable-from-cmd-exe) -## Configuring Docker to use a proxy +### Troubleshooting -As of v1.0, minikube automatically configures the Docker instance inside of the VM to use the proxy environment variables, unless you have specified a `--docker-env` override. If you need to manually configure Docker for a set of proxies, use: - -```shell -minikube start \ - --docker-env=HTTP_PROXY=$HTTP_PROXY \ - --docker-env HTTPS_PROXY=$HTTPS_PROXY \ - --docker-env NO_PROXY=$NO_PROXY -``` - -## Troubleshooting - -### unable to cache ISO... connection refused +#### unable to cache ISO... connection refused ```text Unable to start VM: unable to cache ISO: https://storage.googleapis.com/minikube/iso/minikube.iso: @@ -72,7 +63,7 @@ proxyconnect tcp: dial tcp :: connect: connection refused This error indicates that the host:port combination defined by HTTPS_PROXY or HTTP_PROXY is incorrect, or that the proxy is unavailable. -## Unable to pull images..Client.Timeout exceeded while awaiting headers +#### Unable to pull images..Client.Timeout exceeded while awaiting headers ```text Unable to pull images, which may be OK: @@ -84,7 +75,7 @@ Get https://k8s.gcr.io/v2/: net/http: request canceled while waiting for connect This error indicates that the container runtime running within the VM does not have access to the internet. Verify that you are passing the appropriate value to `--docker-env HTTPS_PROXY`. -## x509: certificate signed by unknown authority +#### x509: certificate signed by unknown authority ```text [ERROR ImagePull]: failed to pull image k8s.gcr.io/kube-apiserver:v1.13.3: @@ -100,10 +91,23 @@ Ask your IT department for the appropriate PEM file, and add it to: Then run `minikube delete` and `minikube start`. -## downloading binaries: proxyconnect tcp: tls: oversized record received with length 20527 +#### downloading binaries: proxyconnect tcp: tls: oversized record received with length 20527 The supplied value of `HTTPS_PROXY` is probably incorrect. Verify that this value is not pointing to an HTTP proxy rather than an HTTPS proxy. -## Additional Information +## VPN -* [Configure Docker to use a proxy server](https://docs.docker.com/network/proxy/) +minikube requires access from the host to the following IP ranges: + +* **192.168.99.0/24**: Used by the minikube VM. Configurable for some hypervisors via `--host-only-cidr` +* **192.168.39.0/24**: Used by the minikube kvm2 driver. +* **10.96.0.0/12**: Used by service cluster IP's. Configurable via `--service-cluster-ip-range` + +Unfortunately, many VPN configurations route packets to these destinations through an encrypted tunnel, rather than allowing the packets to go to the minikube VM. + +### Possible workarounds + +1. If you have access, whitelist the above IP ranges in your VPN software +2. In your VPN software, select an option similar to "Allow local (LAN) access when using VPN" [(Cisco VPN example)](https://superuser.com/questions/987150/virtualbox-guest-os-through-vpn) +3. You may have luck selecting alternate values to the `--host-only-cidr` and `--service-cluster-ip-range` flags. +4. Turn off the VPN diff --git a/site/content/en/logo.png b/site/content/en/docs/logo.png similarity index 100% rename from site/content/en/logo.png rename to site/content/en/docs/logo.png diff --git a/site/content/en/start.png b/site/content/en/docs/start.png similarity index 100% rename from site/content/en/start.png rename to site/content/en/docs/start.png diff --git a/site/content/en/docs/start/_index.md b/site/content/en/docs/start/_index.md new file mode 100644 index 0000000000..df077d6a8b --- /dev/null +++ b/site/content/en/docs/start/_index.md @@ -0,0 +1,190 @@ +--- +title: "minikube start" +linkTitle: "Get Started!" +weight: 1 +aliases: + - /docs/start +--- + +minikube is local Kubernetes, focusing on making it easy to learn and develop for Kubernetes. + +All you need is Docker (or similarly compatible) container or a Virtual Machine environment, and Kubernetes is a single command away: `minikube start` + + +## What you’ll need + +* 2GB of free memory +* 20GB of free disk space +* Internet connection +* Container or virtual machine manager, such as: [Docker]({{}}), [Hyperkit]({{}}), [Hyper-V]({{}}), [KVM]({{}}), [Parallels]({{}}), [Podman]({{}}), [VirtualBox]({{}}), or [VMWare]({{}}) + +

1Installation

+ +{{% tabs %}} +{{% tab "Linux" %}} + +For Linux users, we provide 3 easy download options: + +### Binary download + +```shell + curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 + sudo install minikube-linux-amd64 /usr/local/bin/minikube +``` + +### Debian package + +```shell +curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube_{{< latest >}}-0_amd64.deb +sudo dpkg -i minikube_{{< latest >}}-0_amd64.deb +``` + +### RPM package + +```shell +curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-{{< latest >}}-0.x86_64.rpm +sudo rpm -ivh minikube-{{< latest >}}-0.x86_64.rpm +``` + +{{% /tab %}} +{{% tab "macOS" %}} + +If the [Brew Package Manager](https://brew.sh/) installed: + +```shell +brew install minikube +``` + +Otherwise, download minikube directly: + +```shell +curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-darwin-amd64 +sudo install minikube-darwin-amd64 /usr/local/bin/minikube +``` + +{{% /tab %}} +{{% tab "Windows" %}} + +If the [Chocolatey Package Manager](https://chocolatey.org/) is installed, use it to install minikube: + +```shell +choco install minikube +``` + +Otherwise, download and run the [Windows installer](https://storage.googleapis.com/minikube/releases/latest/minikube-installer.exe) + +{{% /tab %}} +{{% /tabs %}} + +

2Start your cluster

+ +From a terminal with administrator access (but not logged in as root), run: + +```shell +minikube start +``` + +If minikube fails to start, see the [drivers page]({{}}) for help setting up a compatible container or virtual-machine manager. + +

3Interact with your cluster

+ +If you already have kubectl installed, you can now use it to access your shiny new cluster: + +```shell +kubectl get po -A +``` + +Alternatively, minikube can download the appropriate version of kubectl, if you don't mind the double-dashes in the command-line: + +```shell +minikube kubectl -- get po -A +``` + +minikube bundles the Kubernetes Dashboard, allowing you to get easily acclimated to your new environment: + +```shell +minikube dashboard +``` + +

4Deploy applications

+ +Create a sample deployment and expose it on port 8080: + +```shell +kubectl create deployment hello-minikube --image=k8s.gcr.io/echoserver:1.4 +kubectl expose deployment hello-minikube --type=NodePort --port=8080 +``` + +Find your cluster IP: + +```shell +minikube ip +``` + +Either navigate to <your ip>:8080 in your web browser, or let minikube do it for you: + +```shell +minikube service hello-minikube +``` + +To access a LoadBalancer application, use the "minikube tunnel" feature. Here is an example deployment: + +```shell +kubectl create deployment balanced --image=k8s.gcr.io/echoserver:1.4 +kubectl expose deployment balanced --type=LoadBalancer --port=8081 +``` + +In another window, start the tunnel to create a routable IP for the deployment: + +```shell +minikube tunnel +``` + +Access the application using the "service" command, or your web browser. If you are using macOS, minikube will also forward DNS requests for you: [http://balanced.default.svc.cluster.local:8081/](http://balanced.default.svc.cluster.local:8081/) + +

5Manage your cluster

+ +Pause Kubernetes without impacting deployed applications: + +```shell +minikube pause +``` + +Halt the cluster: + +```shell +minikube stop +``` + +Increase the default memory limit (requires a restart): + +```shell +minikube config set memory 16384 +``` + +Browse the catalog of easily installed Kubernetes services: + +```shell +minikube addons list +``` + +Create a second cluster running an older Kubernetes release: + +```shell +minikube start -p aged --kubernetes-version=v1.16.1 +``` + +Delete all of the minikube clusters: + + +```shell +minikube delete --all +``` + +## Take the next step + +* [The minikube handbook]({{}}) +* [Community-contributed tutorials]({{}}) +* [minikube command reference]({{}}) +* [Contributors guide]({{}}) +* Take our [fast 5-question survey](https://forms.gle/Gg3hG5ZySw8c1C24A) to share your thoughts 🙏 \ No newline at end of file diff --git a/site/content/en/docs/Tutorials/_index.md b/site/content/en/docs/tutorials/_index.md similarity index 91% rename from site/content/en/docs/Tutorials/_index.md rename to site/content/en/docs/tutorials/_index.md index 60e07fef45..83db8c6e1f 100755 --- a/site/content/en/docs/Tutorials/_index.md +++ b/site/content/en/docs/tutorials/_index.md @@ -1,8 +1,6 @@ --- title: "Tutorials" -linkTitle: "Tutorials" weight: 4 -date: 2017-01-04 description: > Contributed end-to-end tutorials using minikube --- diff --git a/site/content/en/docs/tutorials/audit-policy.md b/site/content/en/docs/tutorials/audit-policy.md new file mode 100644 index 0000000000..973986f0bb --- /dev/null +++ b/site/content/en/docs/tutorials/audit-policy.md @@ -0,0 +1,39 @@ +--- +title: "Audit Policy" +linkTitle: "Audit Policy" +weight: 1 +date: 2019-11-19 +description: > + Enabling audit policy for minikube +--- + +## Overview + +[Auditing](https://kubernetes.io/docs/Handbook/debug-application-cluster/audit/) is not enabled in minikube by default. +This tutorial shows how to provide an [Audit Policy](https://kubernetes.io/docs/Handbook/debug-application-cluster/audit/#audit-policy) file to the minikube API server on startup. + +## Tutorial + +```shell +minikube stop + +mkdir -p ~/.minikube/files/etc/ssl/certs + +cat < ~/.minikube/files/etc/ssl/certs/audit-policy.yaml +# Log all requests at the Metadata level. +apiVersion: audit.k8s.io/v1 +kind: Policy +rules: +- level: Metadata +EOF + +minikube start \ + --extra-config=apiserver.audit-policy-file=/etc/ssl/certs/audit-policy.yaml \ + --extra-config=apiserver.audit-log-path=- + +kubectl logs kube-apiserver-minikube -n kube-system | grep audit.k8s.io/v1 +``` + +The [Audit Policy](https://kubernetes.io/docs/Handbook/debug-application-cluster/audit/#audit-policy) used in this tutorial is very minimal and quite verbose. As a next step you might want to finetune the `audit-policy.yaml` file. To get the changes applied you need to stop and start minikube. Restarting minikube triggers the [file sync mechanism](https://minikube.sigs.k8s.io/Handbook/sync/) that copies the yaml file onto the minikube node and causes the API server to read the changed policy file. + +Note: Currently there is no dedicated directory to store the `audit-policy.yaml` file in `~/.minikube/`. Using the `~/.minikube/files/etc/ssl/certs` directory is a workaround! This workaround works like this: By putting the file into a sub-directory of `~/.minikube/files/`, the [file sync mechanism](https://minikube.sigs.k8s.io/Handbook/sync/) gets triggered and copies the `audit-policy.yaml` file from the host onto the minikube node. When the API server container gets started by `kubeadm` I'll mount the `/etc/ssl/certs` directory from the minikube node into the container. This is the reason why the `audit-policy.yaml` file has to be stored in the ssl certs directory: It's one of the directories that get mounted from the minikube node into the container. diff --git a/site/content/en/docs/tutorials/configuring_creds_for_aws_ecr.md b/site/content/en/docs/tutorials/configuring_creds_for_aws_ecr.md new file mode 100644 index 0000000000..2476394e0f --- /dev/null +++ b/site/content/en/docs/tutorials/configuring_creds_for_aws_ecr.md @@ -0,0 +1,137 @@ +--- +title: "Configure credentials for AWS Elastic Container Registry using registry-creds addon" +linkTitle: "Configure creds for AWS ECR using registry-creds" +weight: 1 +date: 2020-03-25 +description: > + How to configure credentials for AWS ECR using the registry-creds addon for a minikube cluster +--- + +## Overview + +The minikube [registry-creds addon](https://github.com/kubernetes/minikube/tree/master/deploy/addons/registry-creds) enables developers to setup credentials for pulling images from AWS ECR from inside their minikube cluster. + +The addon automagically refreshes the service account token for the `default` service account in the `default` namespace. + + +## Prerequisites + +- a working minikube cluster +- a container image in AWS ECR that you would like to use +- AWS access keys that can be used to pull the above image +- AWS account number of the account hosting the registry + + +## Configuring and enabling the registry-creds addon + + +### Configure the registry-creds addon + +Configure the minikube registry-creds addon with the following command: + +Note: In this tutorial, we will focus only on the AWS ECR. + +```shell +minikube addons configure registry-creds +``` + +Follow the prompt and enter `y` for AWS ECR. Provide the requested information. It should look like this - +```shell +$ minikube addons configure registry-creds + +Do you want to enable AWS Elastic Container Registry? [y/n]: y +-- Enter AWS Access Key ID: +-- Enter AWS Secret Access Key: +-- (Optional) Enter AWS Session Token: +-- Enter AWS Region: us-west-2 +-- Enter 12 digit AWS Account ID (Comma separated list): +-- (Optional) Enter ARN of AWS role to assume: + +Do you want to enable Google Container Registry? [y/n]: n + +Do you want to enable Docker Registry? [y/n]: n + +Do you want to enable Azure Container Registry? [y/n]: n +✅ registry-creds was successfully configured + +``` + +### Enable the registry-creds addon + +Enable the minikube registry-creds addon with the following command: + +```shell +minikube addons enable registry-creds +``` + +### Create a deployment that uses an image in AWS ECR + +This tutorial will use a vanilla alpine image that has been already uploaded into a repository in AWS ECR. + +Let's use this alpine deployment that is setup to use the alpine image from ECR. Make sure you update the `image` field with a valid URI. + +`alpine-deployment.yaml` +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: alpine-deployment + labels: + app: alpine +spec: + replicas: 1 + selector: + matchLabels: + app: alpine + template: + metadata: + labels: + app: alpine + spec: + containers: + - name: alpine + image: <>.dkr.ecr.<>.amazonaws.com/alpine:latest + command: ['sh', '-c', 'echo Container is Running ; sleep 3600'] +``` + +Create a file called `alpine-deployment.yaml` and paste the contents above. Be sure to replace <> and <> with your actual account number and aws region. Then create the alpine deployment with the following command: + +```shell +kubectl apply -f alpine-deployment.yaml +``` + +### Test your deployment + +Describe the pod and verify the image pull was successful: + +```shell +kubectl describe pods << alpine-deployment-pod-name >> +``` + +You should see an event like this: + +```text +Successfully pulled image "<>.dkr.ecr.<>.amazonaws.com/alpine:latest" +``` + +If you do not see that event, look at the troubleshooting section. + + +## Review + +In the above tutorial, we configured the `registry-creds` addon to refresh the credentials for AWS ECR so that we could pull private container images onto our minikube cluster. We ultimately created a deployment that used an image in a private AWS ECR repository. + + +## Troubleshooting + +- Check if you have a secret called `awsecr-cred` in the `default` namespace by running `kubectl get secrets`. +- Check if the image path is valid. +- Check if the registry-creds addon is enabled by using `minikube addons list`. + +## Caveats + +The service account token for the `default` service account in the `default` namespace is kept updated by the addon. If you create your deployment in a different namespace, the image pull will not work. + +## Related articles + +- [registry-creds addon](https://github.com/kubernetes/minikube/tree/master/deploy/addons/registry-creds) diff --git a/site/content/en/docs/Tutorials/continuous_integration.md b/site/content/en/docs/tutorials/continuous_integration.md similarity index 52% rename from site/content/en/docs/Tutorials/continuous_integration.md rename to site/content/en/docs/tutorials/continuous_integration.md index 4dad6cc9ed..c4ced226c3 100644 --- a/site/content/en/docs/Tutorials/continuous_integration.md +++ b/site/content/en/docs/tutorials/continuous_integration.md @@ -1,41 +1,30 @@ --- title: "Continuous Integration" -linkTitle: "Continuous Integration" weight: 1 -date: 2018-01-02 description: > Using minikube for Continuous Integration --- ## Overview -Most continuous integration environments are already running inside a VM, and may not support nested virtualization. The `none` driver was designed for this use case. +Most continuous integration environments are already running inside a VM, and may not support nested virtualization. -## Prerequisites +The `docker` driver was designed for this use case, as well as the older `none` driver. -- VM running a systemd based Linux distribution - -## Tutorial +## Example Here is an example, that runs minikube from a non-root user, and ensures that the latest stable kubectl is installed: ```shell -curl -Lo minikube \ +curl -LO \ https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 \ - && sudo install minikube /usr/local/bin/ - + && install minikube-linux-amd64 /tmp/ + kv=$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt) -curl -Lo kubectl \ +curl -LO \ https://storage.googleapis.com/kubernetes-release/release/$kv/bin/linux/amd64/kubectl \ - && sudo install kubectl /usr/local/bin/ + && install kubectl /tmp/ export MINIKUBE_WANTUPDATENOTIFICATION=false -export MINIKUBE_HOME=$HOME -export CHANGE_MINIKUBE_NONE_USER=true -export KUBECONFIG=$HOME/.kube/config - -mkdir -p $HOME/.kube $HOME/.minikube -touch $KUBECONFIG - -sudo -E minikube start --driver=none +/tmp/minikube-linux-amd64 start --driver=docker ``` diff --git a/site/content/en/docs/Tutorials/ebpf_tools_in_minikube.md b/site/content/en/docs/tutorials/ebpf_tools_in_minikube.md similarity index 69% rename from site/content/en/docs/Tutorials/ebpf_tools_in_minikube.md rename to site/content/en/docs/tutorials/ebpf_tools_in_minikube.md index 4931f35d9a..21dcb9b0b0 100644 --- a/site/content/en/docs/Tutorials/ebpf_tools_in_minikube.md +++ b/site/content/en/docs/tutorials/ebpf_tools_in_minikube.md @@ -22,25 +22,25 @@ This tutorial will cover how to set up your minikube cluster so that you can run First, start minikube: ``` -$ minikube start +$ minikube start --iso-url https://storage.googleapis.com/minikube-performance/minikube.iso ``` You will need to download and extract necessary kernel headers within minikube: ```shell -$ minikube ssh -- curl -Lo /tmp/kernel-headers-linux-4.19.94.tar.lz4 https://storage.googleapis.com/minikube-kernel-headers/kernel-headers-linux-4.19.94.tar.lz4 +minikube ssh -- curl -Lo /tmp/kernel-headers-linux-4.19.94.tar.lz4 https://storage.googleapis.com/minikube-kernel-headers/kernel-headers-linux-4.19.94.tar.lz4 -$ minikube ssh -- sudo mkdir -p /lib/modules/4.19.94/build +minikube ssh -- sudo mkdir -p /lib/modules/4.19.94/build -$ minikube ssh -- sudo tar -I lz4 -C /lib/modules/4.19.94/build -xvf /tmp/kernel-headers-linux-4.19.94.tar.lz4 +minikube ssh -- sudo tar -I lz4 -C /lib/modules/4.19.94/build -xvf /tmp/kernel-headers-linux-4.19.94.tar.lz4 -$ minikube ssh -- rm /tmp/kernel-headers-linux-4.19.94.tar.lz4 +minikube ssh -- rm /tmp/kernel-headers-linux-4.19.94.tar.lz4 ``` You can now run [BCC tools](https://github.com/iovisor/bcc) as a Docker container in minikube: ```shell -$ minikube ssh -- docker run -it --rm --privileged -v /lib/modules:/lib/modules:ro -v /usr/src:/usr/src:ro -v /etc/localtime:/etc/localtime:ro --workdir /usr/share/bcc/tools zlim/bcc ./execsnoop +$ minikube ssh -- docker run --rm --privileged -v /lib/modules:/lib/modules:ro -v /usr/src:/usr/src:ro -v /etc/localtime:/etc/localtime:ro --workdir /usr/share/bcc/tools zlim/bcc ./execsnoop Unable to find image 'zlim/bcc:latest' locally diff --git a/site/content/en/docs/tutorials/includes/hello-deployment.yaml b/site/content/en/docs/tutorials/includes/hello-deployment.yaml new file mode 100644 index 0000000000..f266444e9f --- /dev/null +++ b/site/content/en/docs/tutorials/includes/hello-deployment.yaml @@ -0,0 +1,33 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: hello +spec: + replicas: 2 + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 100% + selector: + matchLabels: + app: hello + template: + metadata: + labels: + app: hello + spec: + affinity: + # ⬇⬇⬇ This ensures pods will land on separate hosts + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: [{ key: app, operator: In, values: [hello-from] }] + topologyKey: "kubernetes.io/hostname" + containers: + - name: hello-from + image: pbitty/hello-from:latest + ports: + - name: http + containerPort: 80 + terminationGracePeriodSeconds: 1 diff --git a/site/content/en/docs/tutorials/includes/hello-svc.yaml b/site/content/en/docs/tutorials/includes/hello-svc.yaml new file mode 100644 index 0000000000..6db3dd8647 --- /dev/null +++ b/site/content/en/docs/tutorials/includes/hello-svc.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: hello +spec: + type: NodePort + selector: + app: hello + ports: + - protocol: TCP + nodePort: 31000 + port: 80 + targetPort: http diff --git a/site/content/en/docs/tutorials/includes/kube-flannel.yaml b/site/content/en/docs/tutorials/includes/kube-flannel.yaml new file mode 100644 index 0000000000..bfed51be44 --- /dev/null +++ b/site/content/en/docs/tutorials/includes/kube-flannel.yaml @@ -0,0 +1,602 @@ +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: psp.flannel.unprivileged + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default + seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default + apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default + apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default +spec: + privileged: false + volumes: + - configMap + - secret + - emptyDir + - hostPath + allowedHostPaths: + - pathPrefix: "/etc/cni/net.d" + - pathPrefix: "/etc/kube-flannel" + - pathPrefix: "/run/flannel" + readOnlyRootFilesystem: false + # Users and groups + runAsUser: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + fsGroup: + rule: RunAsAny + # Privilege Escalation + allowPrivilegeEscalation: false + defaultAllowPrivilegeEscalation: false + # Capabilities + allowedCapabilities: ['NET_ADMIN'] + defaultAddCapabilities: [] + requiredDropCapabilities: [] + # Host namespaces + hostPID: false + hostIPC: false + hostNetwork: true + hostPorts: + - min: 0 + max: 65535 + # SELinux + seLinux: + # SELinux is unused in CaaSP + rule: 'RunAsAny' +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: flannel +rules: + - apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: ['psp.flannel.unprivileged'] + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: flannel +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flannel +subjects: +- kind: ServiceAccount + name: flannel + namespace: kube-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: flannel + namespace: kube-system +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: kube-flannel-cfg + namespace: kube-system + labels: + tier: node + app: flannel +data: + cni-conf.json: | + { + "name": "cbr0", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "flannel", + "delegate": { + "hairpinMode": true, + "isDefaultGateway": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] + } + net-conf.json: | + { + "Network": "10.244.0.0/16", + "Backend": { + "Type": "vxlan" + } + } +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-flannel-ds-amd64 + namespace: kube-system + labels: + tier: node + app: flannel +spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/os + operator: In + values: + - linux + - key: beta.kubernetes.io/arch + operator: In + values: + - amd64 + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: flannel + initContainers: + - name: install-cni + image: quay.io/coreos/flannel:v0.11.0-amd64 + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + containers: + - name: kube-flannel + image: quay.io/coreos/flannel:v0.11.0-amd64 + command: + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-flannel-ds-arm64 + namespace: kube-system + labels: + tier: node + app: flannel +spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/os + operator: In + values: + - linux + - key: beta.kubernetes.io/arch + operator: In + values: + - arm64 + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: flannel + initContainers: + - name: install-cni + image: quay.io/coreos/flannel:v0.11.0-arm64 + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + containers: + - name: kube-flannel + image: quay.io/coreos/flannel:v0.11.0-arm64 + command: + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-flannel-ds-arm + namespace: kube-system + labels: + tier: node + app: flannel +spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/os + operator: In + values: + - linux + - key: beta.kubernetes.io/arch + operator: In + values: + - arm + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: flannel + initContainers: + - name: install-cni + image: quay.io/coreos/flannel:v0.11.0-arm + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + containers: + - name: kube-flannel + image: quay.io/coreos/flannel:v0.11.0-arm + command: + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-flannel-ds-ppc64le + namespace: kube-system + labels: + tier: node + app: flannel +spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/os + operator: In + values: + - linux + - key: beta.kubernetes.io/arch + operator: In + values: + - ppc64le + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: flannel + initContainers: + - name: install-cni + image: quay.io/coreos/flannel:v0.11.0-ppc64le + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + containers: + - name: kube-flannel + image: quay.io/coreos/flannel:v0.11.0-ppc64le + command: + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-flannel-ds-s390x + namespace: kube-system + labels: + tier: node + app: flannel +spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/os + operator: In + values: + - linux + - key: beta.kubernetes.io/arch + operator: In + values: + - s390x + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: flannel + initContainers: + - name: install-cni + image: quay.io/coreos/flannel:v0.11.0-s390x + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + containers: + - name: kube-flannel + image: quay.io/coreos/flannel:v0.11.0-s390x + command: + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg diff --git a/site/content/en/docs/tutorials/multi_node.md b/site/content/en/docs/tutorials/multi_node.md new file mode 100644 index 0000000000..88ec4bfd5f --- /dev/null +++ b/site/content/en/docs/tutorials/multi_node.md @@ -0,0 +1,134 @@ +--- +title: "Using Multi-Node Clusters (Experimental)" +linkTitle: "Using multi-node clusters" +weight: 1 +date: 2019-11-24 +--- + +## Overview + +- This tutorial will show you how to start a multi-node clusters on minikube and deploy a service to it. + +## Prerequisites + +- minikube 1.9.0 or higher +- kubectl + +## Tutorial + +- Start a cluster with 2 nodes in the driver of your choice (the extra parameters are to make our chosen CNI, flannel, work while we're still experimental): +``` +minikube start --nodes 2 -p multinode-demo --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.244.0.0/16 +😄 [multinode-demo] minikube v1.9.2 on Darwin 10.14.6 +✨ Automatically selected the hyperkit driver +👍 Starting control plane node m01 in cluster multinode-demo +🔥 Creating hyperkit VM (CPUs=2, Memory=4000MB, Disk=20000MB) ... +🐳 Preparing Kubernetes v1.18.0 on Docker 19.03.8 ... +🌟 Enabling addons: default-storageclass, storage-provisioner + +👍 Starting node m02 in cluster multinode-demo +🔥 Creating hyperkit VM (CPUs=2, Memory=4000MB, Disk=20000MB) ... +🌐 Found network options: + ▪ NO_PROXY=192.168.64.213 +🐳 Preparing Kubernetes v1.18.0 on Docker 19.03.8 ... +🏄 Done! kubectl is now configured to use "multinode-demo" +``` + +- Get the list of your nodes: +``` +kubectl get nodes +NAME STATUS ROLES AGE VERSION +multinode-demo Ready master 9m58s v1.18.0 +multinode-demo-m02 Ready 9m5s v1.18.0 +``` + +- Install a CNI (e.g. flannel): +NOTE: This currently needs to be done manually after the apiserver is running, the multi-node feature is still experimental as of 1.9.2. +``` +kubectl apply -f kube-flannel.yaml +podsecuritypolicy.policy/psp.flannel.unprivileged created +clusterrole.rbac.authorization.k8s.io/flannel created +clusterrolebinding.rbac.authorization.k8s.io/flannel created +serviceaccount/flannel created +configmap/kube-flannel-cfg created +daemonset.apps/kube-flannel-ds-amd64 created +daemonset.apps/kube-flannel-ds-arm64 created +daemonset.apps/kube-flannel-ds-arm created +daemonset.apps/kube-flannel-ds-ppc64le created +daemonset.apps/kube-flannel-ds-s390x created +``` + +- Deploy our hello world deployment: +``` +kubectl apply -f hello-deployment.yaml +deployment.apps/hello created + +kubectl rollout status deployment/hello +deployment "hello" successfully rolled out +``` + + +- Deploy our hello world service, which just spits back the IP address the request was served from: +{{% readfile file="/docs/tutorials/includes/hello-svc.yaml" %}} +``` +kubectl apply -f hello-svc.yml +service/hello created +``` + + +- Check out the IP addresses of our pods, to note for future reference +``` +kubectl get pods -o wide +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +hello-c7b8df44f-qbhxh 1/1 Running 0 31s 10.244.0.3 multinode-demo +hello-c7b8df44f-xv4v6 1/1 Running 0 31s 10.244.0.2 multinode-demo +``` + +- Look at our service, to know what URL to hit +``` +minikube service list +|-------------|------------|--------------|-----------------------------| +| NAMESPACE | NAME | TARGET PORT | URL | +|-------------|------------|--------------|-----------------------------| +| default | hello | 80 | http://192.168.64.226:31000 | +| default | kubernetes | No node port | | +| kube-system | kube-dns | No node port | | +|-------------|------------|--------------|-----------------------------| +``` + +- Let's hit the URL a few times and see what comes back +``` +curl http://192.168.64.226:31000 +Hello from hello-c7b8df44f-qbhxh (10.244.0.3) + +curl http://192.168.64.226:31000 +Hello from hello-c7b8df44f-qbhxh (10.244.0.3) + +curl http://192.168.64.226:31000 +Hello from hello-c7b8df44f-xv4v6 (10.244.0.2) + +curl http://192.168.64.226:31000 +Hello from hello-c7b8df44f-xv4v6 (10.244.0.2) +``` + +- Multiple nodes! + + +- Referenced YAML files +{{% tabs %}} +{{% tab kube-flannel.yaml %}} +``` +{{% readfile file="/docs/tutorials/includes/kube-flannel.yaml" %}} +``` +{{% /tab %}} +{{% tab hello-deployment.yaml %}} +``` +{{% readfile file="/docs/tutorials/includes/hello-deployment.yaml" %}} +``` +{{% /tab %}} +{{% tab hello-svc.yaml %}} +``` +{{% readfile file="/docs/tutorials/includes/hello-svc.yaml" %}} +``` +{{% /tab %}} +{{% /tabs %}} diff --git a/site/content/en/docs/Tutorials/nginx_tcp_udp_ingress.md b/site/content/en/docs/tutorials/nginx_tcp_udp_ingress.md similarity index 94% rename from site/content/en/docs/Tutorials/nginx_tcp_udp_ingress.md rename to site/content/en/docs/tutorials/nginx_tcp_udp_ingress.md index 596ddf1f3d..97eabbbabe 100644 --- a/site/content/en/docs/Tutorials/nginx_tcp_udp_ingress.md +++ b/site/content/en/docs/tutorials/nginx_tcp_udp_ingress.md @@ -18,14 +18,14 @@ is only configured to listen on ports 80 and 443. TCP and UDP services listening - Latest minikube binary and ISO - Telnet command line tool -- [Kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl) command line tool +- [Kubectl](https://kubernetes.io/docs/Handbook/tools/install-kubectl) command line tool - A text editor ## Configuring TCP and UDP services with the nginx ingress controller -### Enable the ingress addon +### Enable the ingress addon -Enable the minikube ingress addon with the following command: +Enable the minikube ingress addon with the following command: ```shell minikube addons enable ingress @@ -228,6 +228,6 @@ for the latest info on these potential changes. ## Related articles -- [Routing traffic multiple services on ports 80 and 443 in minikube with the Kubernetes Ingress resource](https://kubernetes.io/docs/tasks/access-application-cluster/ingress-minikube/) -- [Use port forwarding to access applications in a cluster](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) +- [Routing traffic multiple services on ports 80 and 443 in minikube with the Kubernetes Ingress resource](https://kubernetes.io/docs/Handbook/access-application-cluster/ingress-minikube/) +- [Use port forwarding to access applications in a cluster](https://kubernetes.io/docs/Handbook/access-application-cluster/port-forward-access-application-cluster/) diff --git a/site/content/en/docs/Tutorials/nvidia_gpu.md b/site/content/en/docs/tutorials/nvidia_gpu.md similarity index 98% rename from site/content/en/docs/Tutorials/nvidia_gpu.md rename to site/content/en/docs/tutorials/nvidia_gpu.md index 68846ae15b..4e9f561490 100644 --- a/site/content/en/docs/Tutorials/nvidia_gpu.md +++ b/site/content/en/docs/tutorials/nvidia_gpu.md @@ -98,7 +98,7 @@ to expose GPUs with `--driver=kvm2`. Please don't mix these instructions. ## Why does minikube not support NVIDIA GPUs on macOS? -VM drivers supported by minikube for macOS doesn't support GPU passthrough: +drivers supported by minikube for macOS doesn't support GPU passthrough: - [mist64/xhyve#108](https://github.com/mist64/xhyve/issues/108) - [moby/hyperkit#159](https://github.com/moby/hyperkit/issues/159) diff --git a/site/content/en/docs/Tutorials/openid_connect_auth.md b/site/content/en/docs/tutorials/openid_connect_auth.md similarity index 93% rename from site/content/en/docs/Tutorials/openid_connect_auth.md rename to site/content/en/docs/tutorials/openid_connect_auth.md index 46fbe28968..769155c296 100644 --- a/site/content/en/docs/Tutorials/openid_connect_auth.md +++ b/site/content/en/docs/tutorials/openid_connect_auth.md @@ -13,7 +13,7 @@ Read more about OpenID Connect Authentication for Kubernetes here: You can find out how to contribute to these docs in our Contributing Guide. +

You can find out how to contribute to these docs in our Contributing Guide. diff --git a/site/layouts/partials/hooks/head-end.html b/site/layouts/partials/hooks/head-end.html index 0ff1699c98..ed11ad10a1 100644 --- a/site/layouts/partials/hooks/head-end.html +++ b/site/layouts/partials/hooks/head-end.html @@ -1,4 +1,5 @@ + - + \ No newline at end of file diff --git a/site/layouts/partials/section-index.html b/site/layouts/partials/section-index.html new file mode 100644 index 0000000000..ffea5611c7 --- /dev/null +++ b/site/layouts/partials/section-index.html @@ -0,0 +1,39 @@ +{{ $related := .Site.RegularPages.Related . | first 3 }} +{{ with $related }} +

See Also

+ +{{ end }} + +
+ {{ $pages := (where .Site.Pages "Section" .Section).ByWeight }} + {{ $parent := .Page }} + {{ if $parent.Params.no_list }} + {{/* If no_list is true we don't show a list of subpages */}} + {{ else if $parent.Params.simple_list }} + {{/* If simple_list is true we show a bulleted list of subpages */}} +
    + {{ range $pages }} + {{ if eq .Parent $parent }} +
  • {{- .Title -}}
  • + {{ end }} + {{ end }} +
+ {{ else }} + {{/* Otherwise we show a nice formatted list of subpages with page descriptions */}} +
+ {{ range $pages }} + {{ if eq .Parent $parent }} +
+
+ {{- .Title -}} +
+

{{ .Description | markdownify }}

+
+ {{ end }} + {{ end }} + {{ end }} +
diff --git a/site/layouts/partials/sidebar-tree.html b/site/layouts/partials/sidebar-tree.html new file mode 100644 index 0000000000..029abae4c5 --- /dev/null +++ b/site/layouts/partials/sidebar-tree.html @@ -0,0 +1,115 @@ + + +{{/* minikube hack: temporarily forked from docsy/layouts/partials/sidebar-tree.html for hugo v0.69 compatibility */}} + +{{/* We cache this partial for bigger sites and set the active class client side. */}} +{{ $shouldDelayActive := ge (len .Site.Pages) 2000 }} +
+ {{ if not .Site.Params.ui.sidebar_search_disable }} + + {{ end }} + +
+ + + +{{ define "section-tree-nav-section" }} +{{ $s := .section }} +{{ $p := .page }} +{{ $shouldDelayActive := .delayActive }} +{{ $activeSection := eq $p.CurrentSection $s }} + +{{/* minikube hack: Override $showSection due to a Hugo upgrade bug */}} +{{ $showSection := false }} +{{ $expandSection := false }} +{{ $sid := $s.RelPermalink | anchorize }} +{{ $sectionParent := $s.Parent.Title | anchorize }} +{{ $csid := $p.CurrentSection.Title | anchorize }} + +{{ if $p.IsDescendant $s }} + + {{ $showSection = true }} +{{ else if eq $sectionParent "minikube" }} + + {{ $showSection = true }} +{{ else if eq $sectionParent "welcome" }} + + {{ $showSection = true }} +{{ else if eq $sectionParent "handbook" }} + + {{ $showSection = true }} +{{ else if eq $p.CurrentSection $s.Parent }} + + {{ $showSection = true }} +{{ else if $p.Parent.IsAncestor $s }} + + {{ if eq $s $p.CurrentSection }} + {{ $showSection = true }} + {{ end }} + +{{ end }} + + +{{/* end minikube hack */}} + +{{ $sid := $s.RelPermalink | anchorize }} +{{ if $showSection }} +
    +
  • + + {{ $s.LinkTitle }} +
  • +
      +
    • + {{ $pages := where (union $s.Pages $s.Sections).ByWeight ".Params.toc_hide" "!=" true }} + {{ $pages := $pages | first 50 }} + {{ range $pages }} + {{ if .IsPage }} + {{ $mid := printf "m-%s" (.RelPermalink | anchorize) }} + + {{/* minikube hack: Override $activeSection due to a Hugo upgrade bug */}} + {{ $showPage := false }} + {{ $activePage := false }} + + + {{ if $activeSection }} + {{ $showPage = true }} + {{ $activePage := eq . $p }} + {{ end }} + + + {{ if eq $s.Title "Handbook" }} + {{ if lt (len $p.CurrentSection.Pages) 7 }} + {{ $showPage = true }} + {{ end }} + + {{ if eq $csid "welcome" }} + {{ $showPage = true }} + {{ end }} + {{ end }} + + {{ if $showPage }} + {{ .LinkTitle }} + {{ end }} + + {{/* end minikube hack */}} + {{ else }} + {{ template "section-tree-nav-section" (dict "page" $p "section" .) }} + {{ end }} + {{ end }} +
    • +
    +
+ {{ end }} +{{ end }} diff --git a/site/package-lock.json b/site/package-lock.json index 0cb1933f78..bbcb3785d0 100644 --- a/site/package-lock.json +++ b/site/package-lock.json @@ -259,6 +259,16 @@ "integrity": "sha512-Un7MIEDdUC5gNpcGDV97op1Ywk748MpHcFTHoYs6qnj1Z3j7I53VG3nwZhKzoBZmbdRNnb6WRdFlwl7tSDuZGw==", "dev": true }, + "bindings": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/bindings/-/bindings-1.5.0.tgz", + "integrity": "sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==", + "dev": true, + "optional": true, + "requires": { + "file-uri-to-path": "1.0.0" + } + }, "brace-expansion": { "version": "1.1.11", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", @@ -356,9 +366,9 @@ } }, "chokidar": { - "version": "2.1.6", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-2.1.6.tgz", - "integrity": "sha512-V2jUo67OKkc6ySiRpJrjlpJKl9kDuG+Xb8VgsGzb+aEouhgS1D0weyPU4lEzdAcsCAvrih2J2BqyXqHWvVLw5g==", + "version": "2.1.8", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-2.1.8.tgz", + "integrity": "sha512-ZmZUazfOzf0Nve7duiCKD23PFSCs4JPoYyccjUFF3aQkQadqBhfzhjkwBH2mNOG9cTBwhamM37EIsIkZw3nRgg==", "dev": true, "requires": { "anymatch": "^2.0.0", @@ -480,12 +490,14 @@ } }, "cross-spawn": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-5.1.0.tgz", - "integrity": "sha1-6L0O/uWPz/b4+UUQoKVUu/ojVEk=", + "version": "6.0.5", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz", + "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==", "dev": true, "requires": { - "lru-cache": "^4.0.1", + "nice-try": "^1.0.4", + "path-key": "^2.0.1", + "semver": "^5.5.0", "shebang-command": "^1.2.0", "which": "^1.2.9" } @@ -574,6 +586,15 @@ "integrity": "sha512-/QI4hMpAh48a1Sea6PALGv+kuVne9A2EWGd8HrWHMdYhIzGtbhVVHh6heL5fAzGaDnZuPyrlWJRl8WPm4RyiQQ==", "dev": true }, + "end-of-stream": { + "version": "1.4.4", + "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", + "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", + "dev": true, + "requires": { + "once": "^1.4.0" + } + }, "error-ex": { "version": "1.3.2", "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", @@ -596,13 +617,13 @@ "dev": true }, "execa": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/execa/-/execa-0.7.0.tgz", - "integrity": "sha1-lEvs00zEHuMqY6n68nrVpl/Fl3c=", + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/execa/-/execa-1.0.0.tgz", + "integrity": "sha512-adbxcyWV46qiHyvSp50TKt05tB4tK3HcmF7/nxfAdhnox83seTDbwnaqKO4sXRy7roHAIFqJP/Rw/AuEbX61LA==", "dev": true, "requires": { - "cross-spawn": "^5.0.1", - "get-stream": "^3.0.0", + "cross-spawn": "^6.0.0", + "get-stream": "^4.0.0", "is-stream": "^1.1.0", "npm-run-path": "^2.0.0", "p-finally": "^1.0.0", @@ -745,6 +766,13 @@ "micromatch": "^3.1.10" } }, + "file-uri-to-path": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz", + "integrity": "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==", + "dev": true, + "optional": true + }, "fill-range": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-4.0.0.tgz", @@ -810,14 +838,15 @@ "dev": true }, "fsevents": { - "version": "1.2.9", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-1.2.9.tgz", - "integrity": "sha512-oeyj2H3EjjonWcFjD5NvZNE9Rqe4UW+nQBU2HNeKw0koVLEFIhtyETyAakeAM3de7Z/SW5kcA+fZUait9EApnw==", + "version": "1.2.12", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-1.2.12.tgz", + "integrity": "sha512-Ggd/Ktt7E7I8pxZRbGIs7vwqAPscSESMrCSkx2FtWeqmheJgCo2R74fTsZFCifr0VTPwqRpPv17+6b8Zp7th0Q==", "dev": true, "optional": true, "requires": { + "bindings": "^1.5.0", "nan": "^2.12.1", - "node-pre-gyp": "^0.12.0" + "node-pre-gyp": "*" }, "dependencies": { "abbrev": { @@ -865,7 +894,7 @@ } }, "chownr": { - "version": "1.1.1", + "version": "1.1.4", "bundled": true, "dev": true, "optional": true @@ -895,7 +924,7 @@ "optional": true }, "debug": { - "version": "4.1.1", + "version": "3.2.6", "bundled": true, "dev": true, "optional": true, @@ -922,12 +951,12 @@ "optional": true }, "fs-minipass": { - "version": "1.2.5", + "version": "1.2.7", "bundled": true, "dev": true, "optional": true, "requires": { - "minipass": "^2.2.1" + "minipass": "^2.6.0" } }, "fs.realpath": { @@ -953,7 +982,7 @@ } }, "glob": { - "version": "7.1.3", + "version": "7.1.6", "bundled": true, "dev": true, "optional": true, @@ -982,7 +1011,7 @@ } }, "ignore-walk": { - "version": "3.0.1", + "version": "3.0.3", "bundled": true, "dev": true, "optional": true, @@ -1001,7 +1030,7 @@ } }, "inherits": { - "version": "2.0.3", + "version": "2.0.4", "bundled": true, "dev": true, "optional": true @@ -1037,13 +1066,13 @@ } }, "minimist": { - "version": "0.0.8", + "version": "1.2.5", "bundled": true, "dev": true, "optional": true }, "minipass": { - "version": "2.3.5", + "version": "2.9.0", "bundled": true, "dev": true, "optional": true, @@ -1053,42 +1082,42 @@ } }, "minizlib": { - "version": "1.2.1", + "version": "1.3.3", "bundled": true, "dev": true, "optional": true, "requires": { - "minipass": "^2.2.1" + "minipass": "^2.9.0" } }, "mkdirp": { - "version": "0.5.1", + "version": "0.5.3", "bundled": true, "dev": true, "optional": true, "requires": { - "minimist": "0.0.8" + "minimist": "^1.2.5" } }, "ms": { - "version": "2.1.1", + "version": "2.1.2", "bundled": true, "dev": true, "optional": true }, "needle": { - "version": "2.3.0", + "version": "2.3.3", "bundled": true, "dev": true, "optional": true, "requires": { - "debug": "^4.1.0", + "debug": "^3.2.6", "iconv-lite": "^0.4.4", "sax": "^1.2.4" } }, "node-pre-gyp": { - "version": "0.12.0", + "version": "0.14.0", "bundled": true, "dev": true, "optional": true, @@ -1102,11 +1131,11 @@ "rc": "^1.2.7", "rimraf": "^2.6.1", "semver": "^5.3.0", - "tar": "^4" + "tar": "^4.4.2" } }, "nopt": { - "version": "4.0.1", + "version": "4.0.3", "bundled": true, "dev": true, "optional": true, @@ -1116,19 +1145,29 @@ } }, "npm-bundled": { - "version": "1.0.6", + "version": "1.1.1", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "npm-normalize-package-bin": "^1.0.1" + } + }, + "npm-normalize-package-bin": { + "version": "1.0.1", "bundled": true, "dev": true, "optional": true }, "npm-packlist": { - "version": "1.4.1", + "version": "1.4.8", "bundled": true, "dev": true, "optional": true, "requires": { "ignore-walk": "^3.0.1", - "npm-bundled": "^1.0.1" + "npm-bundled": "^1.0.1", + "npm-normalize-package-bin": "^1.0.1" } }, "npmlog": { @@ -1193,7 +1232,7 @@ "optional": true }, "process-nextick-args": { - "version": "2.0.0", + "version": "2.0.1", "bundled": true, "dev": true, "optional": true @@ -1208,18 +1247,10 @@ "ini": "~1.3.0", "minimist": "^1.2.0", "strip-json-comments": "~2.0.1" - }, - "dependencies": { - "minimist": { - "version": "1.2.0", - "bundled": true, - "dev": true, - "optional": true - } } }, "readable-stream": { - "version": "2.3.6", + "version": "2.3.7", "bundled": true, "dev": true, "optional": true, @@ -1234,7 +1265,7 @@ } }, "rimraf": { - "version": "2.6.3", + "version": "2.7.1", "bundled": true, "dev": true, "optional": true, @@ -1261,7 +1292,7 @@ "optional": true }, "semver": { - "version": "5.7.0", + "version": "5.7.1", "bundled": true, "dev": true, "optional": true @@ -1314,18 +1345,18 @@ "optional": true }, "tar": { - "version": "4.4.8", + "version": "4.4.13", "bundled": true, "dev": true, "optional": true, "requires": { "chownr": "^1.1.1", "fs-minipass": "^1.2.5", - "minipass": "^2.3.4", - "minizlib": "^1.1.1", + "minipass": "^2.8.6", + "minizlib": "^1.2.1", "mkdirp": "^0.5.0", "safe-buffer": "^5.1.2", - "yallist": "^3.0.2" + "yallist": "^3.0.3" } }, "util-deprecate": { @@ -1350,7 +1381,7 @@ "optional": true }, "yallist": { - "version": "3.0.3", + "version": "3.1.1", "bundled": true, "dev": true, "optional": true @@ -1370,10 +1401,13 @@ "dev": true }, "get-stream": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz", - "integrity": "sha1-jpQ9E1jcN1VQVOy+LtsFqhdO3hQ=", - "dev": true + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz", + "integrity": "sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==", + "dev": true, + "requires": { + "pump": "^3.0.0" + } }, "get-value": { "version": "2.0.6", @@ -1504,9 +1538,9 @@ "dev": true }, "invert-kv": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/invert-kv/-/invert-kv-1.0.0.tgz", - "integrity": "sha1-EEqOSqym09jNFXqO+L+rLXo//bY=", + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/invert-kv/-/invert-kv-2.0.0.tgz", + "integrity": "sha512-wPVv/y/QQ/Uiirj/vh3oP+1Ww+AWehmi1g5fFWGPF6IpCBCDVrhgHRMvrLfdYcwDh3QJbGXDW4JAuzxElLSqKA==", "dev": true }, "is-accessor-descriptor": { @@ -1701,18 +1735,18 @@ } }, "kind-of": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", - "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==", + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", "dev": true }, "lcid": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/lcid/-/lcid-1.0.0.tgz", - "integrity": "sha1-MIrMr6C8SDo4Z7S28rlQYlHRuDU=", + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/lcid/-/lcid-2.0.0.tgz", + "integrity": "sha512-avPEb8P8EGnwXKClwsNUgryVjllcRqtMYa49NTsbQagYuT1DcXnl1915oxWjoyGrXR6zH/Y0Zc96xWsPcoDKeA==", "dev": true, "requires": { - "invert-kv": "^1.0.0" + "invert-kv": "^2.0.0" } }, "locate-path": { @@ -1740,14 +1774,13 @@ "chalk": "^2.0.1" } }, - "lru-cache": { - "version": "4.1.5", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-4.1.5.tgz", - "integrity": "sha512-sWZlbEP2OsHNkXrMl5GYk/jKk70MBng6UU4YI/qGDYbgf6YbP4EvmqISbXCoJiRKs+1bSpFHVgQxvJ17F2li5g==", + "map-age-cleaner": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/map-age-cleaner/-/map-age-cleaner-0.1.3.tgz", + "integrity": "sha512-bJzx6nMoP6PDLPBFmg7+xRKeFZvFboMrGlxmNj9ClvX53KrmvM5bXFXEWjbz4cz1AFn+jWJ9z/DJSz7hrs0w3w==", "dev": true, "requires": { - "pseudomap": "^1.0.2", - "yallist": "^2.1.2" + "p-defer": "^1.0.0" } }, "map-cache": { @@ -1766,12 +1799,14 @@ } }, "mem": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/mem/-/mem-1.1.0.tgz", - "integrity": "sha1-Xt1StIXKHZAP5kiVUFOZoN+kX3Y=", + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/mem/-/mem-4.3.0.tgz", + "integrity": "sha512-qX2bG48pTqYRVmDB37rn/6PT7LcR8T7oAX3bf99u1Tt1nzxYfxkgqDwUwolPlXweM0XzBOBFzSx4kfp7KP1s/w==", "dev": true, "requires": { - "mimic-fn": "^1.0.0" + "map-age-cleaner": "^0.1.1", + "mimic-fn": "^2.0.0", + "p-is-promise": "^2.0.0" } }, "merge2": { @@ -1802,9 +1837,9 @@ } }, "mimic-fn": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-1.2.0.tgz", - "integrity": "sha512-jf84uxzwiuiIVKiOLpfYk7N46TSy8ubTonmneY9vrpHNAnp0QBt2BxWV9dO3/j+BoVAb+a5G6YDPW3M5HOdMWQ==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", "dev": true }, "minimatch": { @@ -1817,9 +1852,9 @@ } }, "minimist": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.0.tgz", - "integrity": "sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=", + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", + "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==", "dev": true }, "mixin-deep": { @@ -1875,6 +1910,12 @@ "to-regex": "^3.0.1" } }, + "nice-try": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/nice-try/-/nice-try-1.0.5.tgz", + "integrity": "sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==", + "dev": true + }, "node-releases": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-1.1.3.tgz", @@ -1988,22 +2029,34 @@ "dev": true }, "os-locale": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/os-locale/-/os-locale-2.1.0.tgz", - "integrity": "sha512-3sslG3zJbEYcaC4YVAvDorjGxc7tv6KVATnLPZONiljsUncvihe9BQoVCEs0RZ1kmf4Hk9OBqlZfJZWI4GanKA==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/os-locale/-/os-locale-3.1.0.tgz", + "integrity": "sha512-Z8l3R4wYWM40/52Z+S265okfFj8Kt2cC2MKY+xNi3kFs+XGI7WXu/I309QQQYbRW4ijiZ+yxs9pqEhJh0DqW3Q==", "dev": true, "requires": { - "execa": "^0.7.0", - "lcid": "^1.0.0", - "mem": "^1.1.0" + "execa": "^1.0.0", + "lcid": "^2.0.0", + "mem": "^4.0.0" } }, + "p-defer": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/p-defer/-/p-defer-1.0.0.tgz", + "integrity": "sha1-n26xgvbJqozXQwBKfU+WsZaw+ww=", + "dev": true + }, "p-finally": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", "integrity": "sha1-P7z7FbiZpEEjs0ttzBi3JDNqLK4=", "dev": true }, + "p-is-promise": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/p-is-promise/-/p-is-promise-2.1.0.tgz", + "integrity": "sha512-Y3W0wlRPK8ZMRbNq97l4M5otioeA5lm1z7bkNkxCka8HSPjR0xRWmpCmc9utiaLP9Jb1eD8BgeIxTW4AIF45Pg==", + "dev": true + }, "p-limit": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz", @@ -2189,11 +2242,15 @@ "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==", "dev": true }, - "pseudomap": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/pseudomap/-/pseudomap-1.0.2.tgz", - "integrity": "sha1-8FKijacOYYkX7wqKw0wa5aaChrM=", - "dev": true + "pump": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", + "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", + "dev": true, + "requires": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } }, "read-cache": { "version": "1.0.0", @@ -2213,9 +2270,9 @@ } }, "readable-stream": { - "version": "2.3.6", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.6.tgz", - "integrity": "sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==", + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", + "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", "dev": true, "requires": { "core-util-is": "~1.0.0", @@ -2362,9 +2419,9 @@ "dev": true }, "signal-exit": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.2.tgz", - "integrity": "sha1-tf3AjxKH6hF4Yo5BXiUTK3NkbG0=", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.3.tgz", + "integrity": "sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA==", "dev": true }, "slash": { @@ -2685,9 +2742,9 @@ } }, "upath": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/upath/-/upath-1.1.2.tgz", - "integrity": "sha512-kXpym8nmDmlCBr7nKdIx8P2jNBa+pBpIUFRnKJ4dr8htyYGJFokkr2ZvERRtUN+9SY+JqXouNgUPtv6JQva/2Q==", + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/upath/-/upath-1.2.0.tgz", + "integrity": "sha512-aZwGpamFO61g3OlfT7OQCHqhGnW43ieH9WZeP7QxN/G/jS4jfqUkZxoryvJgVPEcrl5NL/ggHsSmLMHuH64Lhg==", "dev": true }, "urix": { @@ -2782,23 +2839,17 @@ "integrity": "sha1-bRX7qITAhnnA136I53WegR4H+kE=", "dev": true }, - "yallist": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-2.1.2.tgz", - "integrity": "sha1-HBH5IY8HYImkfdUS+TxmmaaoHVI=", - "dev": true - }, "yargs": { - "version": "11.1.0", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-11.1.0.tgz", - "integrity": "sha512-NwW69J42EsCSanF8kyn5upxvjp5ds+t3+udGBeTbFnERA+lF541DDpMawzo4z6W/QrzNM18D+BPMiOBibnFV5A==", + "version": "11.1.1", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-11.1.1.tgz", + "integrity": "sha512-PRU7gJrJaXv3q3yQZ/+/X6KBswZiaQ+zOmdprZcouPYtQgvNU35i+68M4b1ZHLZtYFT5QObFLV+ZkmJYcwKdiw==", "dev": true, "requires": { "cliui": "^4.0.0", "decamelize": "^1.1.1", "find-up": "^2.1.0", "get-caller-file": "^1.0.1", - "os-locale": "^2.0.0", + "os-locale": "^3.1.0", "require-directory": "^2.1.1", "require-main-filename": "^1.0.1", "set-blocking": "^2.0.0", diff --git a/site/static/css/tabs.css b/site/static/css/tabs.css index d3c83fd294..60410a2fec 100644 --- a/site/static/css/tabs.css +++ b/site/static/css/tabs.css @@ -8,48 +8,62 @@ ul.nav.nav-tabs { div.code-tabs li.nav-tab { margin-top: 1.5rem; - padding: 10px 20px; + padding: 7px 18px; float: left; text-align: center; text-decoration: none; - color: #666; - font-size: 14px; + color: #444; + font-size: 18px; + font-family: 'Open Sans', sans-serif; - border-top: 1px solid #C7EAEC; - border-left: 1px solid #C7EAEC; - border-right: 1px solid #C7EAEC; + border-top: 1px solid #ccc; + border-left: 1px solid #ccc; + border-right: 1px solid #ccc; margin-right: 0.5em; border-top-left-radius: 4px; border-top-right-radius: 4px; - background-color: #c7eaec; + /* inactive tab (title) */ + background-color: #C5E8EA; /* $mk-light - minus two shades*/ } +/* active tab (title) */ div.code-tabs li.active { color: #f2771a; - border-top: 1px solid #C7EAEC; - border-left: 1px solid #C7EAEC; - border-right: 1px solid #C7EAEC; - background: rgb(244, 255, 255); + border-top: 1px solid #b0b6b6; + border-left: 1px solid #b0b6b6; + border-right: 1px solid #b0b6b6; + background: #f3f9fa; /* $mk-verylight */ font-weight: bold; } +/* active tab background */ +div.tab-pane.active { + display: block; + padding: 2em; + background: #f3f9fa; /* $mk-verylight */ + border: 1px solid #b0b6b6; +} + + +/* active tab code sample */ +div.code-tabs div.highlight { + border: 1px solid #ccc; + background-color: #fff; +} + + div.code-tabs a.nav-tab { all: unset; cursor: pointer; } + div.tab-pane { display: none; margin-bottom: 3rem; } -div.tab-pane.active { - display: block; - padding: 2em; - background: rgb(244, 255, 255); - border: 1px solid #C7EAEC; -} div.code-tabs code { word-break: keep-all; diff --git a/site/static/images/screenshot.png b/site/static/images/screenshot.png new file mode 100644 index 0000000000..4eefa2e796 Binary files /dev/null and b/site/static/images/screenshot.png differ diff --git a/site/themes/docsy b/site/themes/docsy index 493bb1a0af..dd303fd19c 160000 --- a/site/themes/docsy +++ b/site/themes/docsy @@ -1 +1 @@ -Subproject commit 493bb1a0af92d1242f8396aeb1661dcd3a010db7 +Subproject commit dd303fd19cc13ffc01bcbe86ff54f21e423d04de diff --git a/test.sh b/test.sh index 463cca159f..79a3501648 100755 --- a/test.sh +++ b/test.sh @@ -60,10 +60,12 @@ then echo "mode: count" >"${COVERAGE_PATH}" pkgs=$(go list -f '{{ if .TestGoFiles }}{{.ImportPath}}{{end}}' ./cmd/... ./pkg/... | xargs) go test \ + -ldflags="$MINIKUBE_LDFLAGS" \ -tags "container_image_ostree_stub containers_image_openpgp" \ -covermode=count \ -coverprofile="${cov_tmp}" \ - ${pkgs} && echo ok || ((exitcode += 32)) + ${pkgs} \ + && echo ok || ((exitcode += 32)) tail -n +2 "${cov_tmp}" >>"${COVERAGE_PATH}" fi diff --git a/test/integration/aaa_download_only_test.go b/test/integration/aaa_download_only_test.go index e4ca5ebb26..7d198b7634 100644 --- a/test/integration/aaa_download_only_test.go +++ b/test/integration/aaa_download_only_test.go @@ -21,7 +21,6 @@ package integration import ( "context" "crypto/md5" - "encoding/json" "fmt" "io/ioutil" "os" @@ -30,189 +29,153 @@ import ( "runtime" "strings" "testing" - "time" - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1/daemon" - "k8s.io/minikube/pkg/drivers/kic" "k8s.io/minikube/pkg/minikube/bootstrapper/images" - "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/download" "k8s.io/minikube/pkg/minikube/localpath" ) func TestDownloadOnly(t *testing.T) { - profile := UniqueProfileName("download") + for _, r := range []string{"crio", "docker", "containerd"} { + t.Run(r, func(t *testing.T) { + // Stores the startup run result for later error messages + var rrr *RunResult + var err error + + profile := UniqueProfileName(r) + ctx, cancel := context.WithTimeout(context.Background(), Minutes(30)) + defer Cleanup(t, profile, cancel) + + versions := []string{ + constants.OldestKubernetesVersion, + constants.DefaultKubernetesVersion, + constants.NewestKubernetesVersion, + } + + for _, v := range versions { + t.Run(v, func(t *testing.T) { + // Explicitly does not pass StartArgs() to test driver default + // --force to avoid uid check + args := append([]string{"start", "--download-only", "-p", profile, "--force", "--alsologtostderr", fmt.Sprintf("--kubernetes-version=%s", v), fmt.Sprintf("--container-runtime=%s", r)}, StartArgs()...) + + // Preserve the initial run-result for debugging + if rrr == nil { + rrr, err = Run(t, exec.CommandContext(ctx, Target(), args...)) + } else { + _, err = Run(t, exec.CommandContext(ctx, Target(), args...)) + } + + if err != nil { + t.Errorf("failed to download only. args: %q %v", args, err) + } + + // skip for none, as none driver does not have preload feature. + if !NoneDriver() { + if download.PreloadExists(v, r) { + // Just make sure the tarball path exists + if _, err := os.Stat(download.TarballPath(v, r)); err != nil { + t.Errorf("failed to verify preloaded tarball file exists: %v", err) + } + return + } + } + imgs, err := images.Kubeadm("", v) + if err != nil { + t.Errorf("failed to get kubeadm images for %v: %+v", v, err) + } + + // skip verify for cache images if --driver=none + if !NoneDriver() { + for _, img := range imgs { + img = strings.Replace(img, ":", "_", 1) // for example kube-scheduler:v1.15.2 --> kube-scheduler_v1.15.2 + fp := filepath.Join(localpath.MiniPath(), "cache", "images", img) + _, err := os.Stat(fp) + if err != nil { + t.Errorf("expected image file exist at %q but got error: %v", fp, err) + } + } + } + + // checking binaries downloaded (kubelet,kubeadm) + for _, bin := range constants.KubernetesReleaseBinaries { + fp := filepath.Join(localpath.MiniPath(), "cache", "linux", v, bin) + _, err := os.Stat(fp) + if err != nil { + t.Errorf("expected the file for binary exist at %q but got error %v", fp, err) + } + } + + // If we are on darwin/windows, check to make sure OS specific kubectl has been downloaded + // as well for the `minikube kubectl` command + if runtime.GOOS == "linux" { + return + } + binary := "kubectl" + if runtime.GOOS == "windows" { + binary = "kubectl.exe" + } + fp := filepath.Join(localpath.MiniPath(), "cache", runtime.GOOS, v, binary) + if _, err := os.Stat(fp); err != nil { + t.Errorf("expected the file for binary exist at %q but got error %v", fp, err) + } + }) + } + + // This is a weird place to test profile deletion, but this test is serial, and we have a profile to delete! + t.Run("DeleteAll", func(t *testing.T) { + if !CanCleanup() { + t.Skip("skipping, as cleanup is disabled") + } + rr, err := Run(t, exec.CommandContext(ctx, Target(), "delete", "--all")) + if err != nil { + t.Errorf("failed to delete all. args: %q : %v", rr.Command(), err) + } + }) + // Delete should always succeed, even if previously partially or fully deleted. + t.Run("DeleteAlwaysSucceeds", func(t *testing.T) { + if !CanCleanup() { + t.Skip("skipping, as cleanup is disabled") + } + rr, err := Run(t, exec.CommandContext(ctx, Target(), "delete", "-p", profile)) + if err != nil { + t.Errorf("failed to delete. args: %q: %v", rr.Command(), err) + } + }) + }) + } +} + +func TestDownloadOnlyKic(t *testing.T) { + if !KicDriver() { + t.Skip("skipping, only for docker or podman driver") + } + profile := UniqueProfileName("download-docker") ctx, cancel := context.WithTimeout(context.Background(), Minutes(15)) defer Cleanup(t, profile, cancel) - // Stores the startup run result for later error messages - var rrr *RunResult - var err error + cRuntime := "docker" - t.Run("group", func(t *testing.T) { - versions := []string{ - constants.OldestKubernetesVersion, - constants.DefaultKubernetesVersion, - constants.NewestKubernetesVersion, - } - for _, v := range versions { - t.Run(v, func(t *testing.T) { - // Explicitly does not pass StartArgs() to test driver default - // --force to avoid uid check - args := append([]string{"start", "--download-only", "-p", profile, "--force", "--alsologtostderr", fmt.Sprintf("--kubernetes-version=%s", v)}, StartArgs()...) + args := []string{"start", "--download-only", "-p", profile, "--force", "--alsologtostderr"} + args = append(args, StartArgs()...) - // Preserve the initial run-result for debugging - if rrr == nil { - rrr, err = Run(t, exec.CommandContext(ctx, Target(), args...)) - } else { - _, err = Run(t, exec.CommandContext(ctx, Target(), args...)) - } - - if err != nil { - t.Errorf("%s failed: %v", args, err) - } - - imgs, err := images.Kubeadm("", v) - if err != nil { - t.Errorf("kubeadm images: %v %+v", v, err) - } - - // skip verify for cache images if --driver=none - if !NoneDriver() { - for _, img := range imgs { - img = strings.Replace(img, ":", "_", 1) // for example kube-scheduler:v1.15.2 --> kube-scheduler_v1.15.2 - fp := filepath.Join(localpath.MiniPath(), "cache", "images", img) - _, err := os.Stat(fp) - if err != nil { - t.Errorf("expected image file exist at %q but got error: %v", fp, err) - } - } - } - - // checking binaries downloaded (kubelet,kubeadm) - for _, bin := range constants.KubernetesReleaseBinaries { - fp := filepath.Join(localpath.MiniPath(), "cache", "linux", v, bin) - _, err := os.Stat(fp) - if err != nil { - t.Errorf("expected the file for binary exist at %q but got error %v", fp, err) - } - } - - // If we are on darwin/windows, check to make sure OS specific kubectl has been downloaded - // as well for the `minikube kubectl` command - if runtime.GOOS == "linux" { - return - } - binary := "kubectl" - if runtime.GOOS == "windows" { - binary = "kubectl.exe" - } - fp := filepath.Join(localpath.MiniPath(), "cache", runtime.GOOS, v, binary) - if _, err := os.Stat(fp); err != nil { - t.Errorf("expected the file for binary exist at %q but got error %v", fp, err) - } - }) - } - - // Check that the profile we've created has the expected driver - t.Run("ExpectedDefaultDriver", func(t *testing.T) { - if ExpectedDefaultDriver() == "" { - t.Skipf("--expected-default-driver is unset, skipping test") - return - } - rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", "list", "--output", "json")) - if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) - } - var ps map[string][]config.Profile - err = json.Unmarshal(rr.Stdout.Bytes(), &ps) - if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) - } - - got := "" - for _, p := range ps["valid"] { - if p.Name == profile { - got = p.Config.Driver - } - } - - if got != ExpectedDefaultDriver() { - t.Errorf("got driver %q, expected %q\nstart output: %s", got, ExpectedDefaultDriver(), rrr.Output()) - } - }) - - // This is a weird place to test profile deletion, but this test is serial, and we have a profile to delete! - t.Run("DeleteAll", func(t *testing.T) { - if !CanCleanup() { - t.Skip("skipping, as cleanup is disabled") - } - rr, err := Run(t, exec.CommandContext(ctx, Target(), "delete", "--all")) - if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) - } - }) - // Delete should always succeed, even if previously partially or fully deleted. - t.Run("DeleteAlwaysSucceeds", func(t *testing.T) { - if !CanCleanup() { - t.Skip("skipping, as cleanup is disabled") - } - rr, err := Run(t, exec.CommandContext(ctx, Target(), "delete", "-p", profile)) - if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) - } - }) - }) - -} -func TestDownloadOnlyDocker(t *testing.T) { - if !runningDockerDriver(StartArgs()) { - t.Skip("this test only runs with the docker driver") - } - - profile := UniqueProfileName("download-docker") - ctx, cancel := context.WithTimeout(context.Background(), 15*time.Minute) - defer Cleanup(t, profile, cancel) - - args := []string{"start", "--download-only", "-p", profile, "--force", "--alsologtostderr", "--driver=docker"} - rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) - if err != nil { - t.Errorf("%s failed: %v:\n%s", args, err, rr.Output()) + if _, err := Run(t, exec.CommandContext(ctx, Target(), args...)); err != nil { + t.Errorf("start with download only failed %q : %v", args, err) } // Make sure the downloaded image tarball exists - tarball := download.TarballPath(constants.DefaultKubernetesVersion) + tarball := download.TarballPath(constants.DefaultKubernetesVersion, cRuntime) contents, err := ioutil.ReadFile(tarball) if err != nil { - t.Errorf("reading tarball: %v", err) + t.Errorf("failed to read tarball file %q: %v", tarball, err) } // Make sure it has the correct checksum checksum := md5.Sum(contents) - remoteChecksum, err := ioutil.ReadFile(download.PreloadChecksumPath(constants.DefaultKubernetesVersion)) + remoteChecksum, err := ioutil.ReadFile(download.PreloadChecksumPath(constants.DefaultKubernetesVersion, cRuntime)) if err != nil { - t.Errorf("reading checksum file: %v", err) + t.Errorf("failed to read checksum file %q : %v", download.PreloadChecksumPath(constants.DefaultKubernetesVersion, cRuntime), err) } if string(remoteChecksum) != string(checksum[:]) { - t.Errorf("checksum of %s does not match remote checksum (%s != %s)", tarball, string(remoteChecksum), string(checksum[:])) - } - - // Make sure this image exists in the docker daemon - ref, err := name.ParseReference(kic.BaseImage) - if err != nil { - t.Errorf("parsing reference failed: %v", err) - } - if _, err := daemon.Image(ref); err != nil { - t.Errorf("expected image does not exist in local daemon: %v", err) + t.Errorf("failed to verify checksum. checksum of %q does not match remote checksum (%q != %q)", tarball, string(remoteChecksum), string(checksum[:])) } } - -func runningDockerDriver(startArgs []string) bool { - for _, s := range startArgs { - if s == "--driver=docker" { - return true - } - } - return false -} diff --git a/test/integration/aab_offline_test.go b/test/integration/aab_offline_test.go index fb1cdb710f..c635ef09ef 100644 --- a/test/integration/aab_offline_test.go +++ b/test/integration/aab_offline_test.go @@ -53,7 +53,7 @@ func TestOffline(t *testing.T) { rr, err := Run(t, c) if err != nil { // Fatal so that we may collect logs before stop/delete steps - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } }) } diff --git a/test/integration/addons_test.go b/test/integration/addons_test.go index 2120ca4060..e6c04726ef 100644 --- a/test/integration/addons_test.go +++ b/test/integration/addons_test.go @@ -40,10 +40,10 @@ func TestAddons(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), Minutes(40)) defer CleanupWithLogs(t, profile, cancel) - args := append([]string{"start", "-p", profile, "--wait=false", "--memory=2600", "--alsologtostderr", "-v=1", "--addons=ingress", "--addons=registry", "--addons=metrics-server"}, StartArgs()...) + args := append([]string{"start", "-p", profile, "--wait=false", "--memory=2600", "--alsologtostderr", "-v=1", "--addons=ingress", "--addons=registry", "--addons=metrics-server", "--addons=helm-tiller"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } // Parallelized tests @@ -55,6 +55,7 @@ func TestAddons(t *testing.T) { {"Registry", validateRegistryAddon}, {"Ingress", validateIngressAddon}, {"MetricsServer", validateMetricsServerAddon}, + {"HelmTiller", validateHelmTillerAddon}, } for _, tc := range tests { tc := tc @@ -68,15 +69,15 @@ func TestAddons(t *testing.T) { // Assert that disable/enable works offline rr, err = Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to stop minikube. args %q : %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "addons", "enable", "dashboard", "-p", profile)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to enable dashboard addon: args %q : %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "addons", "disable", "dashboard", "-p", profile)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to disable dashboard addon: args %q : %v", rr.Command(), err) } } @@ -87,30 +88,30 @@ func validateIngressAddon(ctx context.Context, t *testing.T, profile string) { client, err := kapi.Client(profile) if err != nil { - t.Fatalf("kubernetes client: %v", client) + t.Fatalf("failed to get kubernetes client: %v", client) } if err := kapi.WaitForDeploymentToStabilize(client, "kube-system", "nginx-ingress-controller", Minutes(6)); err != nil { - t.Errorf("waiting for ingress-controller deployment to stabilize: %v", err) + t.Errorf("failed waiting for ingress-controller deployment to stabilize: %v", err) } if _, err := PodWait(ctx, t, profile, "kube-system", "app.kubernetes.io/name=nginx-ingress-controller", Minutes(12)); err != nil { - t.Fatalf("wait: %v", err) + t.Fatalf("failed waititing for nginx-ingress-controller : %v", err) } rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "nginx-ing.yaml"))) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to kubectl replace nginx-ing. args %q. %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "nginx-pod-svc.yaml"))) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to kubectl replace nginx-pod-svc. args %q. %v", rr.Command(), err) } if _, err := PodWait(ctx, t, profile, "default", "run=nginx", Minutes(4)); err != nil { - t.Fatalf("wait: %v", err) + t.Fatalf("failed waiting for ngnix pod: %v", err) } if err := kapi.WaitForService(client, "default", "nginx", true, time.Millisecond*500, Minutes(10)); err != nil { - t.Errorf("Error waiting for nginx service to be up") + t.Errorf("failed waiting for nginx service to be up: %v", err) } want := "Welcome to nginx!" @@ -120,65 +121,69 @@ func validateIngressAddon(ctx context.Context, t *testing.T, profile string) { return err } if rr.Stderr.String() != "" { - t.Logf("%v: unexpected stderr: %s", rr.Args, rr.Stderr) + t.Logf("%v: unexpected stderr: %s (may be temproary)", rr.Command(), rr.Stderr) } if !strings.Contains(rr.Stdout.String(), want) { - return fmt.Errorf("%v stdout = %q, want %q", rr.Args, rr.Stdout, want) + return fmt.Errorf("%v stdout = %q, want %q", rr.Command(), rr.Stdout, want) } return nil } - if err := retry.Expo(checkIngress, 500*time.Millisecond, Minutes(1)); err != nil { - t.Errorf("ingress never responded as expected on 127.0.0.1:80: %v", err) + if err := retry.Expo(checkIngress, 500*time.Millisecond, Seconds(90)); err != nil { + t.Errorf("failed to get response from ngninx ingress on 127.0.0.1:80: %v", err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "ingress", "--alsologtostderr", "-v=1")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to disable ingress addon. args %q : %v", rr.Command(), err) } } func validateRegistryAddon(ctx context.Context, t *testing.T, profile string) { client, err := kapi.Client(profile) if err != nil { - t.Fatalf("kubernetes client: %v", client) + t.Fatalf("failed to get kubernetes client for %s : %v", profile, err) } start := time.Now() if err := kapi.WaitForRCToStabilize(client, "kube-system", "registry", Minutes(6)); err != nil { - t.Errorf("waiting for registry replicacontroller to stabilize: %v", err) + t.Errorf("failed waiting for registry replicacontroller to stabilize: %v", err) } t.Logf("registry stabilized in %s", time.Since(start)) if _, err := PodWait(ctx, t, profile, "kube-system", "actual-registry=true", Minutes(6)); err != nil { - t.Fatalf("wait: %v", err) + t.Fatalf("failed waiting for pod actual-registry: %v", err) } if _, err := PodWait(ctx, t, profile, "kube-system", "registry-proxy=true", Minutes(10)); err != nil { - t.Fatalf("wait: %v", err) + t.Fatalf("failed waiting for pod registry-proxy: %v", err) } // Test from inside the cluster (no curl available on busybox) rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "delete", "po", "-l", "run=registry-test", "--now")) if err != nil { - t.Logf("pre-cleanup %s failed: %v (not a problem)", rr.Args, err) + t.Logf("pre-cleanup %s failed: %v (not a problem)", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "run", "--rm", "registry-test", "--restart=Never", "--image=busybox", "-it", "--", "sh", "-c", "wget --spider -S http://registry.kube-system.svc.cluster.local")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to hit registry.kube-system.svc.cluster.local. args %q failed: %v", rr.Command(), err) } want := "HTTP/1.1 200" if !strings.Contains(rr.Stdout.String(), want) { - t.Errorf("curl = %q, want *%s*", rr.Stdout.String(), want) + t.Errorf("expected curl response be %q, but got *%s*", want, rr.Stdout.String()) + } + + if NeedsPortForward() { + t.Skipf("Unable to complete rest of the test due to connectivity assumptions") } // Test from outside the cluster rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ip")) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed run minikube ip. args %q : %v", rr.Command(), err) } if rr.Stderr.String() != "" { - t.Errorf("%s: unexpected stderr: %s", rr.Args, rr.Stderr) + t.Errorf("expected stderr to be -empty- but got: *%q* . args %q", rr.Stderr, rr.Command()) } endpoint := fmt.Sprintf("http://%s:%d", strings.TrimSpace(rr.Stdout.String()), 5000) @@ -198,30 +203,30 @@ func validateRegistryAddon(ctx context.Context, t *testing.T, profile string) { return nil } - if err := retry.Expo(checkExternalAccess, 500*time.Millisecond, Minutes(2)); err != nil { - t.Errorf(err.Error()) + if err := retry.Expo(checkExternalAccess, 500*time.Millisecond, Seconds(150)); err != nil { + t.Errorf("failed to check external access to %s: %v", u.String(), err.Error()) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "registry", "--alsologtostderr", "-v=1")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to disable registry addon. args %q: %v", rr.Command(), err) } } func validateMetricsServerAddon(ctx context.Context, t *testing.T, profile string) { client, err := kapi.Client(profile) if err != nil { - t.Fatalf("kubernetes client: %v", client) + t.Fatalf("failed to get kubernetes client for %s: %v", profile, err) } start := time.Now() if err := kapi.WaitForDeploymentToStabilize(client, "kube-system", "metrics-server", Minutes(6)); err != nil { - t.Errorf("waiting for metrics-server deployment to stabilize: %v", err) + t.Errorf("failed waiting for metrics-server deployment to stabilize: %v", err) } t.Logf("metrics-server stabilized in %s", time.Since(start)) if _, err := PodWait(ctx, t, profile, "kube-system", "k8s-app=metrics-server", Minutes(6)); err != nil { - t.Fatalf("wait: %v", err) + t.Fatalf("failed waiting for k8s-app=metrics-server pod: %v", err) } want := "CPU(cores)" @@ -231,21 +236,71 @@ func validateMetricsServerAddon(ctx context.Context, t *testing.T, profile strin return err } if rr.Stderr.String() != "" { - t.Logf("%v: unexpected stderr: %s", rr.Args, rr.Stderr) + t.Logf("%v: unexpected stderr: %s", rr.Command(), rr.Stderr) } if !strings.Contains(rr.Stdout.String(), want) { - return fmt.Errorf("%v stdout = %q, want %q", rr.Args, rr.Stdout, want) + return fmt.Errorf("%v stdout = %q, want %q", rr.Command(), rr.Stdout, want) } return nil } // metrics-server takes some time to be able to collect metrics - if err := retry.Expo(checkMetricsServer, Seconds(13), Minutes(6)); err != nil { - t.Errorf(err.Error()) + if err := retry.Expo(checkMetricsServer, time.Second*3, Minutes(6)); err != nil { + t.Errorf("failed checking metric server: %v", err.Error()) } rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "metrics-server", "--alsologtostderr", "-v=1")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to disable metrics-server addon: args %q: %v", rr.Command(), err) + } +} + +func validateHelmTillerAddon(ctx context.Context, t *testing.T, profile string) { + client, err := kapi.Client(profile) + if err != nil { + t.Fatalf("failed to get kubernetes client for %s: %v", profile, err) + } + + start := time.Now() + if err := kapi.WaitForDeploymentToStabilize(client, "kube-system", "tiller-deploy", Minutes(6)); err != nil { + t.Errorf("failed waiting for tiller-deploy deployment to stabilize: %v", err) + } + t.Logf("tiller-deploy stabilized in %s", time.Since(start)) + + if _, err := PodWait(ctx, t, profile, "kube-system", "app=helm", Minutes(6)); err != nil { + t.Fatalf("failed waiting for helm pod: %v", err) + } + + if NoneDriver() { + _, err := exec.LookPath("socat") + if err != nil { + t.Skipf("socat is required by kubectl to complete this test") + } + } + + want := "Server: &version.Version" + // Test from inside the cluster (`helm version` use pod.list permission. we use tiller serviceaccount in kube-system to list pod) + checkHelmTiller := func() error { + + rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "run", "--rm", "helm-test", "--restart=Never", "--image=alpine/helm:2.16.3", "-it", "--namespace=kube-system", "--serviceaccount=tiller", "--", "version")) + if err != nil { + return err + } + if rr.Stderr.String() != "" { + t.Logf("%v: unexpected stderr: %s", rr.Command(), rr.Stderr) + } + if !strings.Contains(rr.Stdout.String(), want) { + return fmt.Errorf("%v stdout = %q, want %q", rr.Command(), rr.Stdout, want) + } + return nil + } + + if err := retry.Expo(checkHelmTiller, 500*time.Millisecond, Minutes(2)); err != nil { + t.Errorf("failed checking helm tiller: %v", err.Error()) + } + + rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "helm-tiller", "--alsologtostderr", "-v=1")) + if err != nil { + t.Errorf("failed disabling helm-tiller addon. arg %q.s %v", rr.Command(), err) } } diff --git a/test/integration/cert_options_test.go b/test/integration/cert_options_test.go new file mode 100644 index 0000000000..efa1cb4262 --- /dev/null +++ b/test/integration/cert_options_test.go @@ -0,0 +1,56 @@ +// +build integration + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "context" + "os/exec" + "testing" +) + +func TestCertOptions(t *testing.T) { + if NoneDriver() { + t.Skip("skipping: none driver does not support ssh or bundle docker") + } + MaybeParallel(t) + + profile := UniqueProfileName("cert-options") + ctx, cancel := context.WithTimeout(context.Background(), Minutes(30)) + defer CleanupWithLogs(t, profile, cancel) + + // Use the most verbose logging for the simplest test. If it fails, something is very wrong. + args := append([]string{"start", "-p", profile, "--memory=1900", "--apiserver-ips=127.0.0.1,192.168.15.15", "--apiserver-names=localhost,www.google.com", "--apiserver-port=8555"}, StartArgs()...) + + // We can safely override --apiserver-name with + if NeedsPortForward() { + args = append(args, "--apiserver-name=localhost") + } + + rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) + if err != nil { + t.Errorf("failed to start minikube with args: %q : %v", rr.Command(), err) + } + + // test that file written from host was read in by the pod via cat /mount-9p/written-by-host; + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "version")) + if err != nil { + t.Errorf("failed to get kubectl version. args %q : %v", rr.Command(), err) + } + +} diff --git a/test/integration/docker_test.go b/test/integration/docker_test.go index 05075fe579..dd0a27de06 100644 --- a/test/integration/docker_test.go +++ b/test/integration/docker_test.go @@ -39,27 +39,27 @@ func TestDockerFlags(t *testing.T) { args := append([]string{"start", "-p", profile, "--cache-images=false", "--memory=1800", "--install-addons=false", "--wait=false", "--docker-env=FOO=BAR", "--docker-env=BAZ=BAT", "--docker-opt=debug", "--docker-opt=icc=true", "--alsologtostderr", "-v=5"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to start minikube with args: %q : %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo systemctl show docker --property=Environment --no-pager")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to 'systemctl show docker' inside minikube. args %q: %v", rr.Command(), err) } for _, envVar := range []string{"FOO=BAR", "BAZ=BAT"} { if !strings.Contains(rr.Stdout.String(), envVar) { - t.Errorf("env var %s missing: %s.", envVar, rr.Stdout) + t.Errorf("expected env key/value %q to be passed to minikube's docker and be included in: *%q*.", envVar, rr.Stdout) } } rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo systemctl show docker --property=ExecStart --no-pager")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed on the second 'systemctl show docker' inside minikube. args %q: %v", rr.Command(), err) } for _, opt := range []string{"--debug", "--icc=true"} { if !strings.Contains(rr.Stdout.String(), opt) { - t.Fatalf("%s = %q, want *%s*", rr.Command(), rr.Stdout, opt) + t.Fatalf("expected %q output to have include *%s* . output: %q", rr.Command(), opt, rr.Stdout) } } } diff --git a/test/integration/error_spam_test.go b/test/integration/error_spam_test.go new file mode 100644 index 0000000000..98a4fa1508 --- /dev/null +++ b/test/integration/error_spam_test.go @@ -0,0 +1,69 @@ +// +build integration + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "context" + "os/exec" + "strings" + "testing" +) + +// TestErrorSpam asserts that there are no errors displayed +func TestErrorSpam(t *testing.T) { + if NoneDriver() { + t.Skip("none driver always shows a warning") + } + MaybeParallel(t) + + profile := UniqueProfileName("nospam") + ctx, cancel := context.WithTimeout(context.Background(), Minutes(25)) + defer CleanupWithLogs(t, profile, cancel) + + // This should likely use multi-node once it's ready + args := append([]string{"start", "-p", profile, "-n=1", "--memory=2250", "--wait=false"}, StartArgs()...) + + rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) + if err != nil { + t.Errorf("failed to start minikube with args: %q : %v", rr.Command(), err) + } + + for _, line := range strings.Split(rr.Stderr.String(), "\n") { + if strings.HasPrefix(line, "E") { + t.Errorf("unexpected error log in stderr: %q", line) + continue + } + + if strings.Contains(line, "kubectl") || strings.Contains(line, "slow") || strings.Contains(line, "long time") { + continue + } + if len(strings.TrimSpace(line)) > 0 { + t.Errorf("unexpected stderr line: %q", line) + } + } + + for _, line := range strings.Split(rr.Stdout.String(), "\n") { + keywords := []string{"error", "fail", "warning", "conflict"} + for _, keyword := range keywords { + if strings.Contains(line, keyword) { + t.Errorf("unexpected %q in stdout line: %q", keyword, line) + } + } + } +} diff --git a/test/integration/fn_mount_cmd.go b/test/integration/fn_mount_cmd.go index eedc9e7983..915262a833 100644 --- a/test/integration/fn_mount_cmd.go +++ b/test/integration/fn_mount_cmd.go @@ -66,10 +66,10 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) { defer func() { if t.Failed() { - t.Logf("%s failed, getting debug info...", t.Name()) + t.Logf("%q failed, getting debug info...", t.Name()) rr, err := Run(t, exec.Command(Target(), "-p", profile, "ssh", "mount | grep 9p; ls -la /mount-9p; cat /mount-9p/pod-dates")) if err != nil { - t.Logf("%s: %v", rr.Command(), err) + t.Logf("debugging command %q failed : %v", rr.Command(), err) } else { t.Logf("(debug) %s:\n%s", rr.Command(), rr.Stdout) } @@ -78,7 +78,7 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) { // Cleanup in advance of future tests rr, err := Run(t, exec.Command(Target(), "-p", profile, "ssh", "sudo umount -f /mount-9p")) if err != nil { - t.Logf("%s: %v", rr.Command(), err) + t.Logf("%q: %v", rr.Command(), err) } ss.Stop(t) cancel() @@ -106,7 +106,7 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) { } start := time.Now() - if err := retry.Expo(checkMount, time.Second, 15*time.Second); err != nil { + if err := retry.Expo(checkMount, time.Millisecond*500, Seconds(15)); err != nil { // For local testing, allow macOS users to click prompt. If they don't, skip the test. if runtime.GOOS == "darwin" { t.Skip("skipping: mount did not appear, likely because macOS requires prompt to allow non-codesigned binaries to listen on non-localhost port") @@ -117,7 +117,7 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) { // Assert that we can access the mount without an error. Display for debugging. rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "--", "ls", "-la", guestMount)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed verifying accessing to the mount. args %q : %v", rr.Command(), err) } t.Logf("guest mount directory contents\n%s", rr.Stdout) @@ -125,7 +125,7 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) { tp := filepath.Join("/mount-9p", testMarker) rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "cat", tp)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed to verify the mount contains unique test marked: args %q: %v", rr.Command(), err) } if !bytes.Equal(rr.Stdout.Bytes(), wantFromTest) { @@ -136,28 +136,28 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) { // Start the "busybox-mount" pod. rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "busybox-mount-test.yaml"))) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed to 'kubectl replace' for busybox-mount-test. args %q : %v", rr.Command(), err) } if _, err := PodWait(ctx, t, profile, "default", "integration-test=busybox-mount", Minutes(4)); err != nil { - t.Fatalf("wait: %v", err) + t.Fatalf("failed waiting for busybox-mount pod: %v", err) } // Read the file written by pod startup p := filepath.Join(tempDir, createdByPod) got, err := ioutil.ReadFile(p) if err != nil { - t.Errorf("readfile %s: %v", p, err) + t.Errorf("failed to read file created by pod %q: %v", p, err) } wantFromPod := []byte("test\n") if !bytes.Equal(got, wantFromPod) { - t.Errorf("%s = %q, want %q", p, got, wantFromPod) + t.Errorf("the content of the file %q is %q, but want it to be: *%q*", p, got, wantFromPod) } // test that file written from host was read in by the pod via cat /mount-9p/written-by-host; rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "logs", "busybox-mount")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to get kubectl logs for busybox-mount. args %q : %v", rr.Command(), err) } if !bytes.Equal(rr.Stdout.Bytes(), wantFromTest) { t.Errorf("busybox-mount logs = %q, want %q", rr.Stdout.Bytes(), wantFromTest) @@ -169,27 +169,27 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) { // test that file written from host was read in by the pod via cat /mount-9p/fromhost; rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "stat", gp)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to stat the file %q iniside minikube : args %q: %v", gp, rr.Command(), err) } if runtime.GOOS == "windows" { if strings.Contains(rr.Stdout.String(), "Access: 1970-01-01") { - t.Errorf("invalid access time: %v", rr.Stdout) + t.Errorf("expected to get valid access time but got: %q", rr.Stdout) } } if strings.Contains(rr.Stdout.String(), "Modify: 1970-01-01") { - t.Errorf("invalid modify time: %v", rr.Stdout) + t.Errorf("expected to get valid modify time but got: %q", rr.Stdout) } } p = filepath.Join(tempDir, createdByTestRemovedByPod) if _, err := os.Stat(p); err == nil { - t.Errorf("expected file %s to be removed", p) + t.Errorf("expected file %q to be removed but exists !", p) } p = filepath.Join(tempDir, createdByPodRemovedByTest) if err := os.Remove(p); err != nil { - t.Errorf("unexpected error removing file %s: %v", p, err) + t.Errorf("failed to remove file %q: %v", p, err) } } diff --git a/test/integration/fn_pvc.go b/test/integration/fn_pvc.go index 4f97c830ef..9cca92cc47 100644 --- a/test/integration/fn_pvc.go +++ b/test/integration/fn_pvc.go @@ -38,7 +38,7 @@ func validatePersistentVolumeClaim(ctx context.Context, t *testing.T, profile st defer cancel() if _, err := PodWait(ctx, t, profile, "kube-system", "integration-test=storage-provisioner", Minutes(4)); err != nil { - t.Fatalf("wait: %v", err) + t.Fatalf("failed waiting for storage-provisioner: %v", err) } checkStorageClass := func() error { @@ -57,14 +57,14 @@ func validatePersistentVolumeClaim(ctx context.Context, t *testing.T, profile st } // Ensure the addon-manager has created the StorageClass before creating a claim, otherwise it won't be bound - if err := retry.Expo(checkStorageClass, time.Second, 90*time.Second); err != nil { - t.Errorf("no default storage class after retry: %v", err) + if err := retry.Expo(checkStorageClass, time.Millisecond*500, Seconds(100)); err != nil { + t.Errorf("failed to check for storage class: %v", err) } // Now create a testpvc rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "apply", "-f", filepath.Join(*testdataDir, "pvc.yaml"))) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("kubectl apply pvc.yaml failed: args %q: %v", rr.Command(), err) } checkStoragePhase := func() error { @@ -84,6 +84,6 @@ func validatePersistentVolumeClaim(ctx context.Context, t *testing.T, profile st } if err := retry.Expo(checkStoragePhase, 2*time.Second, Minutes(4)); err != nil { - t.Fatalf("PV Creation failed with error: %v", err) + t.Fatalf("failed to check storage phase: %v", err) } } diff --git a/test/integration/fn_tunnel_cmd.go b/test/integration/fn_tunnel_cmd.go index a21b9350c3..8f43a9a5fd 100644 --- a/test/integration/fn_tunnel_cmd.go +++ b/test/integration/fn_tunnel_cmd.go @@ -50,7 +50,7 @@ func validateTunnelCmd(ctx context.Context, t *testing.T, profile string) { client, err := kapi.Client(profile) if err != nil { - t.Fatalf("client: %v", err) + t.Fatalf("failed to get kubernetes client for %q: %v", profile, err) } // Pre-Cleanup @@ -62,14 +62,14 @@ func validateTunnelCmd(ctx context.Context, t *testing.T, profile string) { args := []string{"-p", profile, "tunnel", "--alsologtostderr", "-v=1"} ss, err := Start(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Errorf("%s failed: %v", args, err) + t.Errorf("failed to start a tunnel: args %q: %v", args, err) } defer ss.Stop(t) // Start the "nginx" pod. rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "apply", "-f", filepath.Join(*testdataDir, "testsvc.yaml"))) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } if _, err := PodWait(ctx, t, profile, "default", "run=nginx-svc", Minutes(4)); err != nil { t.Fatalf("wait: %v", err) @@ -97,9 +97,9 @@ func validateTunnelCmd(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "svc", "nginx-svc")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%s failed: %v", rr.Command(), err) } - t.Logf("kubectl get svc nginx-svc:\n%s", rr.Stdout) + t.Logf("failed to kubectl get svc nginx-svc:\n%s", rr.Stdout) } got := []byte{} @@ -119,12 +119,12 @@ func validateTunnelCmd(ctx context.Context, t *testing.T, profile string) { } return nil } - if err = retry.Expo(fetch, time.Millisecond*500, Minutes(2), 6); err != nil { - t.Errorf("failed to contact nginx at %s: %v", nginxIP, err) + if err = retry.Expo(fetch, time.Millisecond*500, Minutes(2), 13); err != nil { + t.Errorf("failed to hit nginx at %q: %v", nginxIP, err) } want := "Welcome to nginx!" if !strings.Contains(string(got), want) { - t.Errorf("body = %q, want *%s*", got, want) + t.Errorf("expected body to contain %q, but got *%q*", want, got) } } diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go index 8445aa364a..5f1ef0124b 100644 --- a/test/integration/functional_test.go +++ b/test/integration/functional_test.go @@ -38,7 +38,9 @@ import ( "github.com/google/go-cmp/cmp" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/localpath" + "k8s.io/minikube/pkg/util/retry" "github.com/elazarl/goproxy" "github.com/hashicorp/go-retryablehttp" @@ -46,12 +48,14 @@ import ( "github.com/phayes/freeport" "github.com/pkg/errors" "golang.org/x/build/kubernetes/api" - "k8s.io/minikube/pkg/util/retry" ) // validateFunc are for subtests that share a single setup type validateFunc func(context.Context, *testing.T, string) +// used in validateStartWithProxy and validateSoftStart +var apiPortTest = 8441 + // TestFunctional are functionality tests which can safely share a profile in parallel func TestFunctional(t *testing.T) { @@ -63,12 +67,17 @@ func TestFunctional(t *testing.T) { } p := localSyncTestPath() if err := os.Remove(p); err != nil { - t.Logf("unable to remove %s: %v", p, err) + t.Logf("unable to remove %q: %v", p, err) } p = localTestCertPath() if err := os.Remove(p); err != nil { - t.Logf("unable to remove %s: %v", p, err) + t.Logf("unable to remove %q: %v", p, err) } + p = localEmptyCertPath() + if err := os.Remove(p); err != nil { + t.Logf("unable to remove %q: %v", p, err) + } + CleanupWithLogs(t, profile, cancel) }() @@ -80,6 +89,7 @@ func TestFunctional(t *testing.T) { }{ {"CopySyncFile", setupFileSync}, // Set file for the file sync test case {"StartWithProxy", validateStartWithProxy}, // Set everything else up for success + {"SoftStart", validateSoftStart}, // do a soft start. ensure config didnt change. {"KubeContext", validateKubeContext}, // Racy: must come immediately after "minikube start" {"KubectlGetPods", validateKubectlGetPods}, // Make sure apiserver is up {"CacheCmd", validateCacheCmd}, // Caches images needed for subsequent tests because of proxy @@ -137,12 +147,12 @@ func TestFunctional(t *testing.T) { func validateNodeLabels(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "nodes", "--output=go-template", "--template='{{range $k, $v := (index .items 0).metadata.labels}}{{$k}} {{end}}'")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to 'kubectl get nodes' with args %q: %v", rr.Command(), err) } expectedLabels := []string{"minikube.k8s.io/commit", "minikube.k8s.io/version", "minikube.k8s.io/updated_at", "minikube.k8s.io/name"} for _, el := range expectedLabels { if !strings.Contains(rr.Output(), el) { - t.Errorf("expected to have label %q in node labels: %q", expectedLabels, rr.Output()) + t.Errorf("expected to have label %q in node labels but got : %s", el, rr.Output()) } } } @@ -155,10 +165,10 @@ func validateDockerEnv(ctx context.Context, t *testing.T, profile string) { c := exec.CommandContext(mctx, "/bin/bash", "-c", "eval $("+Target()+" -p "+profile+" docker-env) && "+Target()+" status -p "+profile) rr, err := Run(t, c) if err != nil { - t.Fatalf("Failed to do minikube status after eval-ing docker-env %s", err) + t.Fatalf("failed to do minikube status after eval-ing docker-env %s", err) } if !strings.Contains(rr.Output(), "Running") { - t.Fatalf("Expected status output to include 'Running' after eval docker-env but got \n%s", rr.Output()) + t.Fatalf("expected status output to include 'Running' after eval docker-env but got: *%s*", rr.Output()) } mctx, cancel = context.WithTimeout(ctx, Seconds(13)) @@ -167,12 +177,12 @@ func validateDockerEnv(ctx context.Context, t *testing.T, profile string) { c = exec.CommandContext(mctx, "/bin/bash", "-c", "eval $("+Target()+" -p "+profile+" docker-env) && docker images") rr, err = Run(t, c) if err != nil { - t.Fatalf("Failed to test eval docker-evn %s", err) + t.Fatalf("failed to run minikube docker-env. args %q : %v ", rr.Command(), err) } expectedImgInside := "gcr.io/k8s-minikube/storage-provisioner" if !strings.Contains(rr.Output(), expectedImgInside) { - t.Fatalf("Expected 'docker ps' to have %q from docker-daemon inside minikube. the docker ps output is:\n%q\n", expectedImgInside, rr.Output()) + t.Fatalf("expected 'docker images' to have %q inside minikube. but the output is: *%s*", expectedImgInside, rr.Output()) } } @@ -180,11 +190,12 @@ func validateDockerEnv(ctx context.Context, t *testing.T, profile string) { func validateStartWithProxy(ctx context.Context, t *testing.T, profile string) { srv, err := startHTTPProxy(t) if err != nil { - t.Fatalf("Failed to set up the test proxy: %s", err) + t.Fatalf("failed to set up the test proxy: %s", err) } // Use more memory so that we may reliably fit MySQL and nginx - startArgs := append([]string{"start", "-p", profile, "--wait=true", "--memory", "2500MB"}, StartArgs()...) + // changing api server so later in soft start we verify it didn't change + startArgs := append([]string{"start", "-p", profile, fmt.Sprintf("--apiserver-port=%d", apiPortTest), "--wait=true"}, StartArgs()...) c := exec.CommandContext(ctx, Target(), startArgs...) env := os.Environ() env = append(env, fmt.Sprintf("HTTP_PROXY=%s", srv.Addr)) @@ -192,7 +203,7 @@ func validateStartWithProxy(ctx context.Context, t *testing.T, profile string) { c.Env = env rr, err := Run(t, c) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed minikube start. args %q: %v", rr.Command(), err) } want := "Found network options:" @@ -206,14 +217,45 @@ func validateStartWithProxy(ctx context.Context, t *testing.T, profile string) { } } +// validateSoftStart validates that after minikube already started, a "minikube start" should not change the configs. +func validateSoftStart(ctx context.Context, t *testing.T, profile string) { + start := time.Now() + // the test before this had been start with --apiserver-port=8441 + beforeCfg, err := config.LoadProfile(profile) + if err != nil { + t.Errorf("error reading cluster config before soft start: %v", err) + } + if beforeCfg.Config.KubernetesConfig.NodePort != apiPortTest { + t.Errorf("expected cluster config node port before soft start to be %d but got %d", apiPortTest, beforeCfg.Config.KubernetesConfig.NodePort) + } + + softStartArgs := []string{"start", "-p", profile} + c := exec.CommandContext(ctx, Target(), softStartArgs...) + rr, err := Run(t, c) + if err != nil { + t.Errorf("failed to soft start minikube. args %q: %v", rr.Command(), err) + } + t.Logf("soft start took %s for %q cluster.", time.Since(start), profile) + + afterCfg, err := config.LoadProfile(profile) + if err != nil { + t.Errorf("error reading cluster config after soft start: %v", err) + } + + if afterCfg.Config.KubernetesConfig.NodePort != apiPortTest { + t.Errorf("expected node port in the config not change after soft start. exepceted node port to be %d but got %d.", apiPortTest, afterCfg.Config.KubernetesConfig.NodePort) + } + +} + // validateKubeContext asserts that kubectl is properly configured (race-condition prone!) func validateKubeContext(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "config", "current-context")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to get current-context. args %q : %v", rr.Command(), err) } if !strings.Contains(rr.Stdout.String(), profile) { - t.Errorf("current-context = %q, want %q", rr.Stdout.String(), profile) + t.Errorf("expected current-context = %q, but got *%q*", profile, rr.Stdout.String()) } } @@ -221,22 +263,23 @@ func validateKubeContext(ctx context.Context, t *testing.T, profile string) { func validateKubectlGetPods(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "po", "-A")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to get kubectl pods: args %q : %v", rr.Command(), err) } if rr.Stderr.String() != "" { - t.Errorf("%s: got unexpected stderr: %s", rr.Command(), rr.Stderr) + t.Errorf("expected stderr to be empty but got *%q*: args %q", rr.Stderr, rr.Command()) } if !strings.Contains(rr.Stdout.String(), "kube-system") { - t.Errorf("%s = %q, want *kube-system*", rr.Command(), rr.Stdout) + t.Errorf("expected stdout to include *kube-system* but got *%q*. args: %q", rr.Stdout, rr.Command()) } } // validateMinikubeKubectl validates that the `minikube kubectl` command returns content func validateMinikubeKubectl(ctx context.Context, t *testing.T, profile string) { - kubectlArgs := []string{"kubectl", "--", "get", "pods"} + // Must set the profile so that it knows what version of Kubernetes to use + kubectlArgs := []string{"-p", profile, "kubectl", "--", "--context", profile, "get", "pods"} rr, err := Run(t, exec.CommandContext(ctx, Target(), kubectlArgs...)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed to get pods. args %q: %v", rr.Command(), err) } } @@ -244,12 +287,12 @@ func validateMinikubeKubectl(ctx context.Context, t *testing.T, profile string) func validateComponentHealth(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "cs", "-o=json")) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed to get components. args %q: %v", rr.Command(), err) } cs := api.ComponentStatusList{} d := json.NewDecoder(bytes.NewReader(rr.Stdout.Bytes())) if err := d.Decode(&cs); err != nil { - t.Fatalf("decode: %v", err) + t.Fatalf("failed to decode kubectl json output: args %q : %v", rr.Command(), err) } for _, i := range cs.Items { @@ -269,40 +312,41 @@ func validateComponentHealth(ctx context.Context, t *testing.T, profile string) func validateStatusCmd(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to run minikube status. args %q : %v", rr.Command(), err) } // Custom format rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "-f", "host:{{.Host}},kublet:{{.Kubelet}},apiserver:{{.APIServer}},kubeconfig:{{.Kubeconfig}}")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to run minikube status with custom format: args %q: %v", rr.Command(), err) } - match, _ := regexp.MatchString(`host:([A-z]+),kublet:([A-z]+),apiserver:([A-z]+),kubeconfig:([A-z]+)`, rr.Stdout.String()) + re := `host:([A-z]+),kublet:([A-z]+),apiserver:([A-z]+),kubeconfig:([A-z]+)` + match, _ := regexp.MatchString(re, rr.Stdout.String()) if !match { - t.Errorf("%s failed: %v. Output for custom format did not match", rr.Args, err) + t.Errorf("failed to match regex %q for minikube status with custom format. args %q. output: %s", re, rr.Command(), rr.Output()) } // Json output rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "-o", "json")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to run minikube status with json output. args %q : %v", rr.Command(), err) } var jsonObject map[string]interface{} err = json.Unmarshal(rr.Stdout.Bytes(), &jsonObject) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to decode json from minikube status. args %q. %v", rr.Command(), err) } if _, ok := jsonObject["Host"]; !ok { - t.Errorf("%s failed: %v. Missing key %s in json object", rr.Args, err, "Host") + t.Errorf("%q failed: %v. Missing key %s in json object", rr.Command(), err, "Host") } if _, ok := jsonObject["Kubelet"]; !ok { - t.Errorf("%s failed: %v. Missing key %s in json object", rr.Args, err, "Kubelet") + t.Errorf("%q failed: %v. Missing key %s in json object", rr.Command(), err, "Kubelet") } if _, ok := jsonObject["APIServer"]; !ok { - t.Errorf("%s failed: %v. Missing key %s in json object", rr.Args, err, "APIServer") + t.Errorf("%q failed: %v. Missing key %s in json object", rr.Command(), err, "APIServer") } if _, ok := jsonObject["Kubeconfig"]; !ok { - t.Errorf("%s failed: %v. Missing key %s in json object", rr.Args, err, "Kubeconfig") + t.Errorf("%q failed: %v. Missing key %s in json object", rr.Command(), err, "Kubeconfig") } } @@ -311,7 +355,7 @@ func validateDashboardCmd(ctx context.Context, t *testing.T, profile string) { args := []string{"dashboard", "--url", "-p", profile, "--alsologtostderr", "-v=1"} ss, err := Start(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Errorf("%s failed: %v", args, err) + t.Errorf("failed to run minikube dashboard. args %q : %v", args, err) } defer func() { ss.Stop(t) @@ -333,12 +377,13 @@ func validateDashboardCmd(ctx context.Context, t *testing.T, profile string) { resp, err := retryablehttp.Get(u.String()) if err != nil { - t.Errorf("failed get: %v", err) + t.Fatalf("failed to http get %q: %v\nresponse: %+v", u.String(), err, resp) } + if resp.StatusCode != http.StatusOK { body, err := ioutil.ReadAll(resp.Body) if err != nil { - t.Errorf("Unable to read http response body: %v", err) + t.Errorf("failed to read http response body from dashboard %q: %v", u.String(), err) } t.Errorf("%s returned status code %d, expected %d.\nbody:\n%s", u, resp.StatusCode, http.StatusOK, body) } @@ -348,12 +393,12 @@ func validateDashboardCmd(ctx context.Context, t *testing.T, profile string) { func validateDNS(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "busybox.yaml"))) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed to kubectl replace busybox : args %q: %v", rr.Command(), err) } names, err := PodWait(ctx, t, profile, "default", "integration-test=busybox", Minutes(4)) if err != nil { - t.Fatalf("wait: %v", err) + t.Fatalf("failed waiting for busybox pod : %v", err) } nslookup := func() error { @@ -363,12 +408,12 @@ func validateDNS(ctx context.Context, t *testing.T, profile string) { // If the coredns process was stable, this retry wouldn't be necessary. if err = retry.Expo(nslookup, 1*time.Second, Minutes(1)); err != nil { - t.Errorf("nslookup failing: %v", err) + t.Errorf("failed to do nslookup on kubernetes.default: %v", err) } want := []byte("10.96.0.1") if !bytes.Contains(rr.Stdout.Bytes(), want) { - t.Errorf("nslookup: got=%q, want=*%q*", rr.Stdout.Bytes(), want) + t.Errorf("failed nslookup: got=%q, want=*%q*", rr.Stdout.Bytes(), want) } } @@ -406,29 +451,29 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) { t.Run("cache", func(t *testing.T) { t.Run("add", func(t *testing.T) { for _, img := range []string{"busybox:latest", "busybox:1.28.4-glibc", "k8s.gcr.io/pause:latest"} { - _, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "add", img)) + rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "add", img)) if err != nil { - t.Errorf("Failed to cache image %q", img) + t.Errorf("failed to cache add image %q. args %q err %v", img, rr.Command(), err) } } }) t.Run("delete_busybox:1.28.4-glibc", func(t *testing.T) { - _, err := Run(t, exec.CommandContext(ctx, Target(), "cache", "delete", "busybox:1.28.4-glibc")) + rr, err := Run(t, exec.CommandContext(ctx, Target(), "cache", "delete", "busybox:1.28.4-glibc")) if err != nil { - t.Errorf("failed to delete image busybox:1.28.4-glibc from cache: %v", err) + t.Errorf("failed to delete image busybox:1.28.4-glibc from cache. args %q: %v", rr.Command(), err) } }) t.Run("list", func(t *testing.T) { rr, err := Run(t, exec.CommandContext(ctx, Target(), "cache", "list")) if err != nil { - t.Errorf("cache list failed: %v", err) + t.Errorf("failed to do cache list. args %q: %v", rr.Command(), err) } if !strings.Contains(rr.Output(), "k8s.gcr.io/pause") { - t.Errorf("cache list did not include k8s.gcr.io/pause") + t.Errorf("expected 'cache list' output to include 'k8s.gcr.io/pause' but got:\n ***%s***", rr.Output()) } if strings.Contains(rr.Output(), "busybox:1.28.4-glibc") { - t.Errorf("cache list should not include busybox:1.28.4-glibc") + t.Errorf("expected 'cache list' output not to include busybox:1.28.4-glibc but got:\n ***%s***", rr.Output()) } }) @@ -438,7 +483,7 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) { t.Errorf("failed to get images by %q ssh %v", rr.Command(), err) } if !strings.Contains(rr.Output(), "1.28.4-glibc") { - t.Errorf("expected '1.28.4-glibc' to be in the output: %s", rr.Output()) + t.Errorf("expected '1.28.4-glibc' to be in the output but got *%s*", rr.Output()) } }) @@ -453,17 +498,17 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) { // make sure the image is deleted. rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo", "crictl", "inspecti", img)) if err == nil { - t.Errorf("expected the image be deleted and get error but got nil error ! cmd: %q", rr.Command()) + t.Errorf("expected an error. because image should not exist. but got *nil error* ! cmd: %q", rr.Command()) } // minikube cache reload. rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "reload")) if err != nil { - t.Errorf("expected %q to run successfully but got error %v", rr.Command(), err) + t.Errorf("expected %q to run successfully but got error: %v", rr.Command(), err) } // make sure 'cache reload' brought back the manually deleted image. rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo", "crictl", "inspecti", img)) if err != nil { - t.Errorf("expected to get no error for %q but got %v", rr.Command(), err) + t.Errorf("expected %q to run successfully but got error: %v", rr.Command(), err) } }) @@ -479,7 +524,7 @@ func validateConfigCmd(ctx context.Context, t *testing.T, profile string) { }{ {[]string{"unset", "cpus"}, "", ""}, {[]string{"get", "cpus"}, "", "Error: specified key could not be found in config"}, - {[]string{"set", "cpus", "2"}, "! These changes will take effect upon a minikube delete and then a minikube start", ""}, + {[]string{"set", "cpus", "2"}, "", "! These changes will take effect upon a minikube delete and then a minikube start"}, {[]string{"get", "cpus"}, "2", ""}, {[]string{"unset", "cpus"}, "", ""}, {[]string{"get", "cpus"}, "", "Error: specified key could not be found in config"}, @@ -489,16 +534,16 @@ func validateConfigCmd(ctx context.Context, t *testing.T, profile string) { args := append([]string{"-p", profile, "config"}, tc.args...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil && tc.wantErr == "" { - t.Errorf("unexpected failure: %s failed: %v", rr.Args, err) + t.Errorf("failed to config minikube. args %q : %v", rr.Command(), err) } got := strings.TrimSpace(rr.Stdout.String()) if got != tc.wantOut { - t.Errorf("%s stdout got: %q, want: %q", rr.Command(), got, tc.wantOut) + t.Errorf("expected config output for %q to be -%q- but got *%q*", rr.Command(), tc.wantOut, got) } got = strings.TrimSpace(rr.Stderr.String()) if got != tc.wantErr { - t.Errorf("%s stderr got: %q, want: %q", rr.Command(), got, tc.wantErr) + t.Errorf("expected config error for %q to be -%q- but got *%q*", rr.Command(), tc.wantErr, got) } } } @@ -507,11 +552,11 @@ func validateConfigCmd(ctx context.Context, t *testing.T, profile string) { func validateLogsCmd(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "logs")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%s failed: %v", rr.Command(), err) } for _, word := range []string{"Docker", "apiserver", "Linux", "kubelet"} { if !strings.Contains(rr.Stdout.String(), word) { - t.Errorf("minikube logs missing expected word: %q", word) + t.Errorf("excpeted minikube logs to include word: -%q- but got \n***%s***\n", word, rr.Output()) } } } @@ -523,16 +568,16 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) { nonexistentProfile := "lis" rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", nonexistentProfile)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%s failed: %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "profile", "list", "--output", "json")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%s failed: %v", rr.Command(), err) } var profileJSON map[string][]map[string]interface{} err = json.Unmarshal(rr.Stdout.Bytes(), &profileJSON) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%s failed: %v", rr.Command(), err) } for profileK := range profileJSON { for _, p := range profileJSON[profileK] { @@ -548,7 +593,7 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) { // List profiles rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", "list")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to list profiles: args %q : %v", rr.Command(), err) } // Table output @@ -562,21 +607,20 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) { } } if !profileExists { - t.Errorf("%s failed: Missing profile '%s'. Got '\n%s\n'", rr.Args, profile, rr.Stdout.String()) + t.Errorf("expected 'profile list' output to include %q but got *%q*. args: %q", profile, rr.Stdout.String(), rr.Command()) } - }) t.Run("profile_json_output", func(t *testing.T) { // Json output rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", "list", "--output", "json")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to list profiles with json format. args %q: %v", rr.Command(), err) } var jsonObject map[string][]map[string]interface{} err = json.Unmarshal(rr.Stdout.Bytes(), &jsonObject) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to decode json from profile list: args %q: %v", rr.Command(), err) } validProfiles := jsonObject["valid"] profileExists := false @@ -587,7 +631,7 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) { } } if !profileExists { - t.Errorf("%s failed: Missing profile '%s'. Got '\n%s\n'", rr.Args, profile, rr.Stdout.String()) + t.Errorf("expected the json of 'profile list' to include %q but got *%q*. args: %q", profile, rr.Stdout.String(), rr.Command()) } }) @@ -597,56 +641,60 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) { func validateServiceCmd(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "deployment", "hello-node", "--image=gcr.io/hello-minikube-zero-install/hello-node")) if err != nil { - t.Logf("%s failed: %v (may not be an error)", rr.Args, err) + t.Logf("%q failed: %v (may not be an error).", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "expose", "deployment", "hello-node", "--type=NodePort", "--port=8080")) if err != nil { - t.Logf("%s failed: %v (may not be an error)", rr.Args, err) + t.Logf("%q failed: %v (may not be an error)", rr.Command(), err) } if _, err := PodWait(ctx, t, profile, "default", "app=hello-node", Minutes(10)); err != nil { - t.Fatalf("wait: %v", err) + t.Fatalf("failed waiting for hello-node pod: %v", err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "list")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to do service list. args %q : %v", rr.Command(), err) } if !strings.Contains(rr.Stdout.String(), "hello-node") { - t.Errorf("service list got %q, wanted *hello-node*", rr.Stdout.String()) + t.Errorf("expected 'service list' to contain *hello-node* but got -%q-", rr.Stdout.String()) + } + + if NeedsPortForward() { + t.Skipf("test is broken for port-forwarded drivers: https://github.com/kubernetes/minikube/issues/7383") } // Test --https --url mode rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "--namespace=default", "--https", "--url", "hello-node")) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed to get service url. args %q : %v", rr.Command(), err) } if rr.Stderr.String() != "" { - t.Errorf("unexpected stderr output: %s", rr.Stderr) + t.Errorf("expected stderr to be empty but got *%q*", rr.Stderr) } endpoint := strings.TrimSpace(rr.Stdout.String()) u, err := url.Parse(endpoint) if err != nil { - t.Fatalf("failed to parse %q: %v", endpoint, err) + t.Fatalf("failed to parse service url endpoint %q: %v", endpoint, err) } if u.Scheme != "https" { - t.Errorf("got scheme: %q, expected: %q", u.Scheme, "https") + t.Errorf("expected scheme to be 'https' but got %q", u.Scheme) } // Test --format=IP rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "hello-node", "--url", "--format={{.IP}}")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to get service url with custom format. args %q: %v", rr.Command(), err) } if strings.TrimSpace(rr.Stdout.String()) != u.Hostname() { - t.Errorf("%s = %q, wanted %q", rr.Args, rr.Stdout.String(), u.Hostname()) + t.Errorf("expected 'service --format={{.IP}}' output to be -%q- but got *%q* . args %q.", u.Hostname(), rr.Stdout.String(), rr.Command()) } // Test a regular URLminikube rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "hello-node", "--url")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to get service url. args: %q: %v", rr.Command(), err) } endpoint = strings.TrimSpace(rr.Stdout.String()) @@ -655,7 +703,7 @@ func validateServiceCmd(ctx context.Context, t *testing.T, profile string) { t.Fatalf("failed to parse %q: %v", endpoint, err) } if u.Scheme != "http" { - t.Fatalf("got scheme: %q, expected: %q", u.Scheme, "http") + t.Fatalf("expected scheme to be -%q- got scheme: *%q*", "http", u.Scheme) } t.Logf("url: %s", endpoint) @@ -664,7 +712,7 @@ func validateServiceCmd(ctx context.Context, t *testing.T, profile string) { t.Fatalf("get failed: %v\nresp: %v", err, resp) } if resp.StatusCode != http.StatusOK { - t.Fatalf("%s = status code %d, want %d", u, resp.StatusCode, http.StatusOK) + t.Fatalf("expected status code for %q to be -%q- but got *%q*", endpoint, http.StatusOK, resp.StatusCode) } } @@ -673,23 +721,23 @@ func validateAddonsCmd(ctx context.Context, t *testing.T, profile string) { // Table output rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "list")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to do addon list: args %q : %v", rr.Command(), err) } for _, a := range []string{"dashboard", "ingress", "ingress-dns"} { if !strings.Contains(rr.Output(), a) { - t.Errorf("addon list expected to include %q but didn't output: %q", a, rr.Output()) + t.Errorf("expected 'addon list' output to include -%q- but got *%s*", a, rr.Output()) } } // Json output rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "list", "-o", "json")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to do addon list with json output. args %q: %v", rr.Command(), err) } var jsonObject map[string]interface{} err = json.Unmarshal(rr.Stdout.Bytes(), &jsonObject) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to decode addon list json output : %v", err) } } @@ -698,13 +746,13 @@ func validateSSHCmd(ctx context.Context, t *testing.T, profile string) { if NoneDriver() { t.Skipf("skipping: ssh unsupported by none") } - want := "hello\r\n" + want := "hello\n" rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("echo hello"))) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to run an ssh command. args %q : %v", rr.Command(), err) } if rr.Stdout.String() != want { - t.Errorf("%v = %q, want = %q", rr.Args, rr.Stdout.String(), want) + t.Errorf("expected minikube ssh command output to be -%q- but got *%q*. args %q", want, rr.Stdout.String(), rr.Command()) } } @@ -712,12 +760,12 @@ func validateSSHCmd(ctx context.Context, t *testing.T, profile string) { func validateMySQL(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "mysql.yaml"))) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed to kubectl replace mysql: args %q failed: %v", rr.Command(), err) } names, err := PodWait(ctx, t, profile, "default", "app=mysql", Minutes(10)) if err != nil { - t.Fatalf("podwait: %v", err) + t.Fatalf("failed waiting for mysql pod: %v", err) } // Retry, as mysqld first comes up without users configured. Scan for names in case of a reschedule. @@ -725,8 +773,8 @@ func validateMySQL(ctx context.Context, t *testing.T, profile string) { rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "exec", names[0], "--", "mysql", "-ppassword", "-e", "show databases;")) return err } - if err = retry.Expo(mysql, 5*time.Second, 180*time.Second); err != nil { - t.Errorf("mysql failing: %v", err) + if err = retry.Expo(mysql, 1*time.Second, Minutes(5)); err != nil { + t.Errorf("failed to exec 'mysql -ppassword -e show databases;': %v", err) } } @@ -750,18 +798,49 @@ func localTestCertPath() string { return filepath.Join(localpath.MiniPath(), "/certs", testCert()) } +// localEmptyCertPath is where the test file will be synced into the VM +func localEmptyCertPath() string { + return filepath.Join(localpath.MiniPath(), "/certs", fmt.Sprintf("%d_empty.pem", os.Getpid())) +} + // Copy extra file into minikube home folder for file sync test func setupFileSync(ctx context.Context, t *testing.T, profile string) { p := localSyncTestPath() t.Logf("local sync path: %s", p) err := copy.Copy("./testdata/sync.test", p) if err != nil { - t.Fatalf("copy: %v", err) + t.Fatalf("failed to copy ./testdata/sync.test: %v", err) } - err = copy.Copy("./testdata/minikube_test.pem", localTestCertPath()) + testPem := "./testdata/minikube_test.pem" + + // Write to a temp file for an atomic write + tmpPem := localTestCertPath() + ".pem" + if err := copy.Copy(testPem, tmpPem); err != nil { + t.Fatalf("failed to copy %s: %v", testPem, err) + } + + if err := os.Rename(tmpPem, localTestCertPath()); err != nil { + t.Fatalf("failed to rename %s: %v", tmpPem, err) + } + + want, err := os.Stat(testPem) if err != nil { - t.Fatalf("copy: %v", err) + t.Fatalf("stat failed: %v", err) + } + + got, err := os.Stat(localTestCertPath()) + if err != nil { + t.Fatalf("stat failed: %v", err) + } + + if want.Size() != got.Size() { + t.Errorf("%s size=%d, want %d", localTestCertPath(), got.Size(), want.Size()) + } + + // Create an empty file just to mess with people + if _, err := os.Create(localEmptyCertPath()); err != nil { + t.Fatalf("create failed: %v", err) } } @@ -773,16 +852,16 @@ func validateFileSync(ctx context.Context, t *testing.T, profile string) { vp := vmSyncTestPath() t.Logf("Checking for existence of %s within VM", vp) - rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("cat %s", vp))) + rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("sudo cat %s", vp))) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%s failed: %v", rr.Command(), err) } got := rr.Stdout.String() t.Logf("file sync test content: %s", got) expected, err := ioutil.ReadFile("./testdata/sync.test") if err != nil { - t.Errorf("test file not found: %v", err) + t.Errorf("failed to read test file '/testdata/sync.test' : %v", err) } if diff := cmp.Diff(string(expected), got); diff != "" { @@ -810,15 +889,15 @@ func validateCertSync(ctx context.Context, t *testing.T, profile string) { } for _, vp := range paths { t.Logf("Checking for existence of %s within VM", vp) - rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("cat %s", vp))) + rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("sudo cat %s", vp))) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to check existence of %q inside minikube. args %q: %v", vp, rr.Command(), err) } // Strip carriage returned by ssh got := strings.Replace(rr.Stdout.String(), "\r", "", -1) if diff := cmp.Diff(string(want), got); diff != "" { - t.Errorf("minikube_test.pem -> %s mismatch (-want +got):\n%s", vp, diff) + t.Errorf("failed verify pem file. minikube_test.pem -> %s mismatch (-want +got):\n%s", vp, diff) } } } @@ -827,10 +906,10 @@ func validateCertSync(ctx context.Context, t *testing.T, profile string) { func validateUpdateContextCmd(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "update-context", "--alsologtostderr", "-v=2")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to run minikube update-context: args %q: %v", rr.Command(), err) } - want := []byte("IP was already correctly configured") + want := []byte("No changes") if !bytes.Contains(rr.Stdout.Bytes(), want) { t.Errorf("update-context: got=%q, want=*%q*", rr.Stdout.Bytes(), want) } diff --git a/test/integration/guest_env_test.go b/test/integration/guest_env_test.go index 41344ccbc8..0b4958ad4c 100644 --- a/test/integration/guest_env_test.go +++ b/test/integration/guest_env_test.go @@ -27,6 +27,7 @@ import ( "k8s.io/minikube/pkg/minikube/vmpath" ) +// TestGuestEnvironment verifies files and packges installed inside minikube ISO/Base image func TestGuestEnvironment(t *testing.T) { MaybeParallel(t) @@ -37,18 +38,18 @@ func TestGuestEnvironment(t *testing.T) { args := append([]string{"start", "-p", profile, "--install-addons=false", "--memory=1800", "--wait=false"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to start minikube: args %q: %v", rr.Command(), err) } // Run as a group so that our defer doesn't happen as tests are runnings t.Run("Binaries", func(t *testing.T) { - for _, pkg := range []string{"git", "rsync", "curl", "wget", "socat", "iptables", "VBoxControl", "VBoxService"} { + for _, pkg := range []string{"git", "rsync", "curl", "wget", "socat", "iptables", "VBoxControl", "VBoxService", "crictl", "podman", "docker"} { pkg := pkg t.Run(pkg, func(t *testing.T) { t.Parallel() rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("which %s", pkg))) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to verify existence of %q binary : args %q: %v", pkg, rr.Command(), err) } }) } @@ -67,9 +68,9 @@ func TestGuestEnvironment(t *testing.T) { mount := mount t.Run(mount, func(t *testing.T) { t.Parallel() - rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("df -t ext4 %s | grep %s", mount, mount))) + rr, err := Run(t, exec.CommandContext(ctx, Targt(), "-p", profile, "ssh", fmt.Sprintf("df -t ext4 %s | grep %s", mount, mount))) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to verify existence of %q mount. args %q: %v", mount, rr.Command(), err) } }) } diff --git a/test/integration/gvisor_addon_test.go b/test/integration/gvisor_addon_test.go index d69f1d205b..d5744eeafe 100644 --- a/test/integration/gvisor_addon_test.go +++ b/test/integration/gvisor_addon_test.go @@ -50,59 +50,59 @@ func TestGvisorAddon(t *testing.T) { startArgs := append([]string{"start", "-p", profile, "--memory=2200", "--container-runtime=containerd", "--docker-opt", "containerd=/var/run/containerd/containerd.sock"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed to start minikube: args %q: %v", rr.Command(), err) } // If it exists, include a locally built gvisor image rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "add", "gcr.io/k8s-minikube/gvisor-addon:2")) if err != nil { - t.Logf("%s failed: %v (won't test local image)", rr.Args, err) + t.Logf("%s failed: %v (won't test local image)", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "enable", "gvisor")) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } if _, err := PodWait(ctx, t, profile, "kube-system", "kubernetes.io/minikube-addons=gvisor", Minutes(4)); err != nil { - t.Fatalf("waiting for gvisor controller to be up: %v", err) + t.Fatalf("failed waiting for 'gvisor controller' pod: %v", err) } // Create an untrusted workload rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "nginx-untrusted.yaml"))) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } // Create gvisor workload rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "nginx-gvisor.yaml"))) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } if _, err := PodWait(ctx, t, profile, "default", "run=nginx,untrusted=true", Minutes(4)); err != nil { - t.Errorf("nginx: %v", err) + t.Errorf("failed waiting for nginx pod: %v", err) } if _, err := PodWait(ctx, t, profile, "default", "run=nginx,runtime=gvisor", Minutes(4)); err != nil { - t.Errorf("nginx: %v", err) + t.Errorf("failed waitinf for gvisor pod: %v", err) } // Ensure that workloads survive a restart rr, err = Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("faild stopping minikube. args %q : %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed starting minikube after a stop. args %q, %v", rr.Command(), err) } if _, err := PodWait(ctx, t, profile, "kube-system", "kubernetes.io/minikube-addons=gvisor", Minutes(4)); err != nil { - t.Errorf("waiting for gvisor controller to be up: %v", err) + t.Errorf("failed waiting for 'gvisor controller' pod : %v", err) } if _, err := PodWait(ctx, t, profile, "default", "run=nginx,untrusted=true", Minutes(4)); err != nil { - t.Errorf("nginx: %v", err) + t.Errorf("failed waiting for 'nginx' pod : %v", err) } if _, err := PodWait(ctx, t, profile, "default", "run=nginx,runtime=gvisor", Minutes(4)); err != nil { - t.Errorf("nginx: %v", err) + t.Errorf("failed waiting for 'gvisor' pod : %v", err) } } diff --git a/test/integration/helpers.go b/test/integration/helpers.go index bac683ea87..bd1fe19525 100644 --- a/test/integration/helpers.go +++ b/test/integration/helpers.go @@ -63,14 +63,24 @@ func (rr RunResult) Command() string { return sb.String() } +// indentLines indents every line in a bytes.Buffer and returns it as string +func indentLines(b []byte) string { + scanner := bufio.NewScanner(bytes.NewReader(b)) + var lines string + for scanner.Scan() { + lines = lines + "\t" + scanner.Text() + "\n" + } + return lines +} + // Output returns human-readable output for an execution result func (rr RunResult) Output() string { var sb strings.Builder if rr.Stdout.Len() > 0 { - sb.WriteString(fmt.Sprintf("-- stdout --\n%s\n-- /stdout --", rr.Stdout.Bytes())) + sb.WriteString(fmt.Sprintf("\n-- stdout --\n%s\n-- /stdout --", indentLines(rr.Stdout.Bytes()))) } if rr.Stderr.Len() > 0 { - sb.WriteString(fmt.Sprintf("\n** stderr ** \n%s\n** /stderr **", rr.Stderr.Bytes())) + sb.WriteString(fmt.Sprintf("\n** stderr ** \n%s\n** /stderr **", indentLines(rr.Stderr.Bytes()))) } return sb.String() } @@ -191,13 +201,23 @@ func clusterLogs(t *testing.T, profile string) { return } + t.Logf("-----------------------post-mortem--------------------------------") t.Logf("<<< %s FAILED: start of post-mortem logs <<<", t.Name()) + t.Logf("======> post-mortem[%s]: minikube logs <======", t.Name()) + rr, err := Run(t, exec.Command(Target(), "-p", profile, "logs", "--problems")) if err != nil { t.Logf("failed logs error: %v", err) return } - t.Logf("%s logs: %s", t.Name(), rr.Stdout) + t.Logf("%s logs: %s", t.Name(), rr.Output()) + + t.Logf("======> post-mortem[%s]: disk usage <======", t.Name()) + rr, err = Run(t, exec.Command(Target(), "-p", profile, "ssh", "sudo df -h /var/lib/docker/overlay2 /var /;sudo du -hs /var/lib/docker/overlay2")) + if err != nil { + t.Logf("failed df error: %v", err) + } + t.Logf("%s df: %s", t.Name(), rr.Stdout) st = Status(context.Background(), t, Target(), profile, "APIServer") if st != state.Running.String() { @@ -205,20 +225,32 @@ func clusterLogs(t *testing.T, profile string) { return } + t.Logf("======> post-mortem[%s]: get pods <======", t.Name()) rr, rerr := Run(t, exec.Command("kubectl", "--context", profile, "get", "po", "-A", "--show-labels")) if rerr != nil { t.Logf("%s: %v", rr.Command(), rerr) return } - t.Logf("(dbg) %s:\n%s", rr.Command(), rr.Stdout) + t.Logf("(dbg) %s:\n%s", rr.Command(), rr.Output()) + t.Logf("======> post-mortem[%s]: describe node <======", t.Name()) rr, err = Run(t, exec.Command("kubectl", "--context", profile, "describe", "node")) + if err != nil { + t.Logf("%s: %v", rr.Command(), err) + } else { + t.Logf("(dbg) %s:\n%s", rr.Command(), rr.Output()) + } + + t.Logf("======> post-mortem[%s]: describe pods <======", t.Name()) + rr, err = Run(t, exec.Command("kubectl", "--context", profile, "describe", "po", "-A")) if err != nil { t.Logf("%s: %v", rr.Command(), err) } else { t.Logf("(dbg) %s:\n%s", rr.Command(), rr.Stdout) } + t.Logf("<<< %s FAILED: end of post-mortem logs <<<", t.Name()) + t.Logf("---------------------/post-mortem---------------------------------") } // podStatusMsg returns a human-readable pod status, for generating debug status diff --git a/test/integration/main.go b/test/integration/main.go index 33c5e09618..2ef2d90731 100644 --- a/test/integration/main.go +++ b/test/integration/main.go @@ -20,6 +20,7 @@ import ( "flag" "fmt" "os" + "runtime" "strings" "testing" "time" @@ -27,7 +28,6 @@ import ( // General configuration: used to set the VM Driver var startArgs = flag.String("minikube-start-args", "", "Arguments to pass to minikube start") -var defaultDriver = flag.String("expected-default-driver", "", "Expected default driver") // Flags for faster local integration testing var forceProfile = flag.String("profile", "", "force tests to run against a particular profile") @@ -61,17 +61,23 @@ func Target() string { // NoneDriver returns whether or not this test is using the none driver func NoneDriver() bool { - return strings.Contains(*startArgs, "--driver=none") + return strings.Contains(*startArgs, "--driver=none") || strings.Contains(*startArgs, "--vm-driver=none") } // HyperVDriver returns whether or not this test is using the Hyper-V driver func HyperVDriver() bool { - return strings.Contains(*startArgs, "--driver=hyperv") + return strings.Contains(*startArgs, "--driver=hyperv") || strings.Contains(*startArgs, "--vm-driver=hyperv") } -// ExpectedDefaultDriver returns the expected default driver, if any -func ExpectedDefaultDriver() string { - return *defaultDriver +// KicDriver returns whether or not this test is using the docker or podman driver +func KicDriver() bool { + return strings.Contains(*startArgs, "--driver=docker") || strings.Contains(*startArgs, "--vm-driver=docker") || strings.Contains(*startArgs, "--vm-driver=podman") || strings.Contains(*startArgs, "driver=podman") +} + +// NeedsPortForward returns access to endpoints with this driver needs port forwarding +// (Docker on non-Linux platforms requires ports to be forwarded to 127.0.0.1) +func NeedsPortForward() bool { + return KicDriver() && (runtime.GOOS == "windows" || runtime.GOOS == "darwin") } // CanCleanup returns if cleanup is allowed diff --git a/test/integration/none_test.go b/test/integration/none_test.go index e95468f86c..ed77814dee 100644 --- a/test/integration/none_test.go +++ b/test/integration/none_test.go @@ -46,22 +46,22 @@ func TestChangeNoneUser(t *testing.T) { startArgs := append([]string{"CHANGE_MINIKUBE_NONE_USER=true", Target(), "start", "--wait=false"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, "/usr/bin/env", startArgs...)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%s failed: %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "delete")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%s failed: %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, "/usr/bin/env", startArgs...)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%s failed: %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "status")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%s failed: %v", rr.Command(), err) } username := os.Getenv("SUDO_USER") @@ -77,7 +77,13 @@ func TestChangeNoneUser(t *testing.T) { t.Errorf("Failed to convert uid to int: %v", err) } - for _, p := range []string{localpath.MiniPath(), filepath.Join(u.HomeDir, ".kube/config")} { + // Retrieve the kube config from env + kubeConfig := os.Getenv("KUBECONFIG") + if kubeConfig == "" { + kubeConfig = filepath.Join(u.HomeDir, ".kube/config") + } + + for _, p := range []string{localpath.MiniPath(), kubeConfig} { info, err := os.Stat(p) if err != nil { t.Errorf("stat(%s): %v", p, err) diff --git a/test/integration/start_stop_delete_test.go b/test/integration/start_stop_delete_test.go index c891309a69..593019c06f 100644 --- a/test/integration/start_stop_delete_test.go +++ b/test/integration/start_stop_delete_test.go @@ -71,6 +71,9 @@ func TestStartStop(t *testing.T) { "--disable-driver-mounts", "--extra-config=kubeadm.ignore-preflight-errors=SystemVerification", }}, + {"embed-certs", constants.DefaultKubernetesVersion, []string{ + "--embed-certs", + }}, } for _, tc := range tests { @@ -86,13 +89,18 @@ func TestStartStop(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), Minutes(40)) defer CleanupWithLogs(t, profile, cancel) - startArgs := append([]string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=3", "--wait=true"}, tc.args...) + waitFlag := "--wait=true" + if strings.Contains(tc.name, "cni") { // wait=app_running is broken for CNI https://github.com/kubernetes/minikube/issues/7354 + waitFlag = "--wait=apiserver,system_pods,default_sa" + } + + startArgs := append([]string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=3", waitFlag}, tc.args...) startArgs = append(startArgs, StartArgs()...) startArgs = append(startArgs, fmt.Sprintf("--kubernetes-version=%s", tc.version)) rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed starting minikube -first start-. args %q: %v", rr.Command(), err) } if !strings.Contains(tc.name, "cni") { @@ -101,43 +109,43 @@ func TestStartStop(t *testing.T) { rr, err = Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile, "--alsologtostderr", "-v=3")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed stopping minikube - first stop-. args %q : %v", rr.Command(), err) } // The none driver never really stops if !NoneDriver() { got := Status(ctx, t, Target(), profile, "Host") if got != state.Stopped.String() { - t.Errorf("post-stop host status = %q; want = %q", got, state.Stopped) + t.Errorf("expected post-stop host status to be -%q- but got *%q*", state.Stopped, got) } } // Enable an addon to assert it comes up afterwards rr, err = Run(t, exec.CommandContext(ctx, Target(), "addons", "enable", "dashboard", "-p", profile)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to enable an addon post-stop. args %q: %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { // Explicit fatal so that failures don't move directly to deletion - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed to start minikube post-stop. args %q: %v", rr.Command(), err) } if strings.Contains(tc.name, "cni") { t.Logf("WARNING: cni mode requires additional setup before pods can schedule :(") } else { - if _, err := PodWait(ctx, t, profile, "default", "integration-test=busybox", Minutes(4)); err != nil { - t.Fatalf("post-stop-start pod wait: %v", err) + if _, err := PodWait(ctx, t, profile, "default", "integration-test=busybox", Minutes(7)); err != nil { + t.Fatalf("failed waiting for pod 'busybox' post-stop-start: %v", err) } - if _, err := PodWait(ctx, t, profile, "kubernetes-dashboard", "k8s-app=kubernetes-dashboard", Minutes(4)); err != nil { - t.Fatalf("post-stop-start addon wait: %v", err) + if _, err := PodWait(ctx, t, profile, "kubernetes-dashboard", "k8s-app=kubernetes-dashboard", Minutes(9)); err != nil { + t.Fatalf("failed waiting for 'addon dashboard' pod post-stop-start: %v", err) } } got := Status(ctx, t, Target(), profile, "Host") if got != state.Running.String() { - t.Errorf("post-start host status = %q; want = %q", got, state.Running) + t.Errorf("expected host status after start-stop-start to be -%q- but got *%q*", state.Running, got) } if !NoneDriver() { @@ -150,7 +158,7 @@ func TestStartStop(t *testing.T) { // Normally handled by cleanuprofile, but not fatal there rr, err = Run(t, exec.CommandContext(ctx, Target(), "delete", "-p", profile)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to clean up: args %q: %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "config", "get-contexts", profile)) @@ -158,7 +166,7 @@ func TestStartStop(t *testing.T) { t.Logf("config context error: %v (may be ok)", err) } if rr.ExitCode != 1 { - t.Errorf("wanted exit code 1, got %d. output: %s", rr.ExitCode, rr.Output()) + t.Errorf("expected exit code 1, got %d. output: %s", rr.ExitCode, rr.Output()) } } }) @@ -175,22 +183,23 @@ func TestStartStopWithPreload(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), Minutes(40)) defer CleanupWithLogs(t, profile, cancel) - startArgs := []string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=3", "--wait=true"} + startArgs := []string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=3", "--wait=true", "--preload=false"} startArgs = append(startArgs, StartArgs()...) k8sVersion := "v1.17.0" startArgs = append(startArgs, fmt.Sprintf("--kubernetes-version=%s", k8sVersion)) rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } // Now, pull the busybox image into the VMs docker daemon image := "busybox" rr, err = Run(t, exec.CommandContext(ctx, Target(), "ssh", "-p", profile, "--", "docker", "pull", image)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } + // Restart minikube with v1.17.3, which has a preloaded tarball startArgs = []string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=3", "--wait=true"} startArgs = append(startArgs, StartArgs()...) @@ -198,13 +207,11 @@ func TestStartStopWithPreload(t *testing.T) { startArgs = append(startArgs, fmt.Sprintf("--kubernetes-version=%s", k8sVersion)) rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } - - // Ensure that busybox still exists in the daemon rr, err = Run(t, exec.CommandContext(ctx, Target(), "ssh", "-p", profile, "--", "docker", "images")) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } if !strings.Contains(rr.Output(), image) { t.Fatalf("Expected to find %s in output of `docker images`, instead got %s", image, rr.Output()) @@ -218,7 +225,7 @@ func testPodScheduling(ctx context.Context, t *testing.T, profile string) { // schedule a pod to assert persistence rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "busybox.yaml"))) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } // 8 minutes, because 4 is not enough for images to pull in all cases. @@ -251,21 +258,23 @@ func testPulledImages(ctx context.Context, t *testing.T, profile string, version rr, err := Run(t, exec.CommandContext(ctx, Target(), "ssh", "-p", profile, "sudo crictl images -o json")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed tp get images inside minikube. args %q: %v", rr.Command(), err) } jv := map[string][]struct { Tags []string `json:"repoTags"` }{} err = json.Unmarshal(rr.Stdout.Bytes(), &jv) if err != nil { - t.Errorf("images unmarshal: %v", err) + t.Errorf("failed to decode images json %v. output: %s", err, rr.Output()) } - gotImages := []string{} + found := map[string]bool{} for _, img := range jv["images"] { for _, i := range img.Tags { + // Remove container-specific prefixes for naming consistency + i = strings.TrimPrefix(i, "docker.io/") + i = strings.TrimPrefix(i, "localhost/") if defaultImage(i) { - // Remove docker.io for naming consistency between container runtimes - gotImages = append(gotImages, strings.TrimPrefix(i, "docker.io/")) + found[i] = true } else { t.Logf("Found non-minikube image: %s", i) } @@ -273,7 +282,11 @@ func testPulledImages(ctx context.Context, t *testing.T, profile string, version } want, err := images.Kubeadm("", version) if err != nil { - t.Errorf("kubeadm images: %v", version) + t.Errorf("failed to get kubeadm images for %s : %v", version, err) + } + gotImages := []string{} + for k := range found { + gotImages = append(gotImages, k) } sort.Strings(want) sort.Strings(gotImages) @@ -288,7 +301,7 @@ func testPause(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, Target(), "pause", "-p", profile, "--alsologtostderr", "-v=1")) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } got := Status(ctx, t, Target(), profile, "APIServer") @@ -303,7 +316,7 @@ func testPause(ctx context.Context, t *testing.T, profile string) { rr, err = Run(t, exec.CommandContext(ctx, Target(), "unpause", "-p", profile, "--alsologtostderr", "-v=1")) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } got = Status(ctx, t, Target(), profile, "APIServer") diff --git a/test/integration/version_upgrade_test.go b/test/integration/version_upgrade_test.go index e9c655a9b4..fa6a4d4653 100644 --- a/test/integration/version_upgrade_test.go +++ b/test/integration/version_upgrade_test.go @@ -75,29 +75,30 @@ func TestVersionUpgrade(t *testing.T) { return err } - // Retry to allow flakiness for the previous release - if err := retry.Expo(r, 1*time.Second, Minutes(30), 3); err != nil { + // Retry up to two times, to allow flakiness for the previous release + if err := retry.Expo(r, 1*time.Second, Minutes(30), 2); err != nil { t.Fatalf("release start failed: %v", err) } rr, err = Run(t, exec.CommandContext(ctx, tf.Name(), "stop", "-p", profile)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, tf.Name(), "-p", profile, "status", "--format={{.Host}}")) if err != nil { t.Logf("status error: %v (may be ok)", err) } + got := strings.TrimSpace(rr.Stdout.String()) if got != state.Stopped.String() { - t.Errorf("status = %q; want = %q", got, state.Stopped.String()) + t.Errorf("FAILED: status = %q; want = %q", got, state.Stopped.String()) } args = append([]string{"start", "-p", profile, fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...) rr, err = Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to start minikube HEAD with newest k8s version. args: %s : %v", rr.Command(), err) } s, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "version", "--output=json")) @@ -119,20 +120,16 @@ func TestVersionUpgrade(t *testing.T) { t.Fatalf("expected server version %s is not the same with latest version %s", cv.ServerVersion.GitVersion, constants.NewestKubernetesVersion) } + t.Logf("Attempting to downgrade Kubernetes (should fail)") args = append([]string{"start", "-p", profile, fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...) - rr = &RunResult{} - r = func() error { - rr, err = Run(t, exec.CommandContext(ctx, tf.Name(), args...)) - return err - } - - if err := retry.Expo(r, 1*time.Second, Minutes(30), 3); err == nil { - t.Fatalf("downgrading kubernetes should not be allowed: %v", err) + if rr, err := Run(t, exec.CommandContext(ctx, tf.Name(), args...)); err == nil { + t.Fatalf("downgrading kubernetes should not be allowed. expected to see error but got %v for %q", err, rr.Command()) } + t.Logf("Attempting restart after unsuccessful downgrade") args = append([]string{"start", "-p", profile, fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...) rr, err = Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("start after failed upgrade: %v", err) } } diff --git a/third_party/go9p/srv_pipe_freebsd.go b/third_party/go9p/srv_pipe_freebsd.go new file mode 100644 index 0000000000..453c52fec1 --- /dev/null +++ b/third_party/go9p/srv_pipe_freebsd.go @@ -0,0 +1,41 @@ +// Copyright 2009 The go9p Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package go9p + +import ( + "fmt" + "os" + "syscall" +) + +func (dir *pipeDir) dotu(path string, d os.FileInfo, upool Users, sysMode *syscall.Stat_t) { + u := upool.Uid2User(int(sysMode.Uid)) + g := upool.Gid2Group(int(sysMode.Gid)) + dir.Uid = u.Name() + if dir.Uid == "" { + dir.Uid = "none" + } + + dir.Gid = g.Name() + if dir.Gid == "" { + dir.Gid = "none" + } + dir.Muid = "none" + dir.Ext = "" + dir.Uidnum = uint32(u.Id()) + dir.Gidnum = uint32(g.Id()) + dir.Muidnum = NOUID + if d.Mode()&os.ModeSymlink != 0 { + var err error + dir.Ext, err = os.Readlink(path) + if err != nil { + dir.Ext = "" + } + } else if isBlock(d) { + dir.Ext = fmt.Sprintf("b %d %d", sysMode.Rdev>>24, sysMode.Rdev&0xFFFFFF) + } else if isChar(d) { + dir.Ext = fmt.Sprintf("c %d %d", sysMode.Rdev>>24, sysMode.Rdev&0xFFFFFF) + } +} diff --git a/third_party/go9p/ufs_freebsd.go b/third_party/go9p/ufs_freebsd.go new file mode 100644 index 0000000000..da9a10fae2 --- /dev/null +++ b/third_party/go9p/ufs_freebsd.go @@ -0,0 +1,239 @@ +// Copyright 2009 The go9p Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package go9p + +import ( + "fmt" + "os" + "os/user" + "path" + "strconv" + "strings" + "syscall" + "time" +) + +func atime(stat *syscall.Stat_t) time.Time { + return time.Unix(stat.Atimespec.Unix()) +} + +// IsBlock reports if the file is a block device +func isBlock(d os.FileInfo) bool { + stat := d.Sys().(*syscall.Stat_t) + return (stat.Mode & syscall.S_IFMT) == syscall.S_IFBLK +} + +// IsChar reports if the file is a character device +func isChar(d os.FileInfo) bool { + stat := d.Sys().(*syscall.Stat_t) + return (stat.Mode & syscall.S_IFMT) == syscall.S_IFCHR +} + +func dir2Qid(d os.FileInfo) *Qid { + var qid Qid + + qid.Path = d.Sys().(*syscall.Stat_t).Ino + qid.Version = uint32(d.ModTime().UnixNano() / 1000000) + qid.Type = dir2QidType(d) + + return &qid +} + +func dir2Dir(path string, d os.FileInfo, dotu bool, upool Users) (*Dir, error) { + if r := recover(); r != nil { + fmt.Print("stat failed: ", r) + return nil, &os.PathError{"dir2Dir", path, nil} + } + sysif := d.Sys() + if sysif == nil { + return nil, &os.PathError{"dir2Dir: sysif is nil", path, nil} + } + sysMode := sysif.(*syscall.Stat_t) + + dir := new(ufsDir) + dir.Qid = *dir2Qid(d) + dir.Mode = dir2Npmode(d, dotu) + dir.Atime = uint32(0 /*atime(sysMode).Unix()*/) + dir.Mtime = uint32(d.ModTime().Unix()) + dir.Length = uint64(d.Size()) + dir.Name = path[strings.LastIndex(path, "/")+1:] + + if dotu { + dir.dotu(path, d, upool, sysMode) + return &dir.Dir, nil + } + + unixUid := int(sysMode.Uid) + unixGid := int(sysMode.Gid) + dir.Uid = strconv.Itoa(unixUid) + dir.Gid = strconv.Itoa(unixGid) + + // BUG(akumar): LookupId will never find names for + // groups, as it only operates on user ids. + u, err := user.LookupId(dir.Uid) + if err == nil { + dir.Uid = u.Username + } + g, err := user.LookupId(dir.Gid) + if err == nil { + dir.Gid = g.Username + } + + /* For Akaros, we use the Muid as the link value. */ + if *Akaros && (d.Mode()&os.ModeSymlink != 0) { + dir.Muid, err = os.Readlink(path) + if err == nil { + dir.Mode |= DMSYMLINK + } + } + return &dir.Dir, nil +} + +func (dir *ufsDir) dotu(path string, d os.FileInfo, upool Users, sysMode *syscall.Stat_t) { + u := upool.Uid2User(int(sysMode.Uid)) + g := upool.Gid2Group(int(sysMode.Gid)) + dir.Uid = u.Name() + if dir.Uid == "" { + dir.Uid = "none" + } + + dir.Gid = g.Name() + if dir.Gid == "" { + dir.Gid = "none" + } + dir.Muid = "none" + dir.Ext = "" + dir.Uidnum = uint32(u.Id()) + dir.Gidnum = uint32(g.Id()) + dir.Muidnum = NOUID + if d.Mode()&os.ModeSymlink != 0 { + var err error + dir.Ext, err = os.Readlink(path) + if err != nil { + dir.Ext = "" + } + } else if isBlock(d) { + dir.Ext = fmt.Sprintf("b %d %d", sysMode.Rdev>>24, sysMode.Rdev&0xFFFFFF) + } else if isChar(d) { + dir.Ext = fmt.Sprintf("c %d %d", sysMode.Rdev>>24, sysMode.Rdev&0xFFFFFF) + } +} + +func (u *Ufs) Wstat(req *SrvReq) { + fid := req.Fid.Aux.(*ufsFid) + err := fid.stat() + if err != nil { + req.RespondError(err) + return + } + + dir := &req.Tc.Dir + if dir.Mode != 0xFFFFFFFF { + mode := dir.Mode & 0777 + if req.Conn.Dotu { + if dir.Mode&DMSETUID > 0 { + mode |= syscall.S_ISUID + } + if dir.Mode&DMSETGID > 0 { + mode |= syscall.S_ISGID + } + } + e := os.Chmod(fid.path, os.FileMode(mode)) + if e != nil { + req.RespondError(toError(e)) + return + } + } + + uid, gid := NOUID, NOUID + if req.Conn.Dotu { + uid = dir.Uidnum + gid = dir.Gidnum + } + + // Try to find local uid, gid by name. + if (dir.Uid != "" || dir.Gid != "") && !req.Conn.Dotu { + uid, err = lookup(dir.Uid, false) + if err != nil { + req.RespondError(err) + return + } + + // BUG(akumar): Lookup will never find gids + // corresponding to group names, because + // it only operates on user names. + gid, err = lookup(dir.Gid, true) + if err != nil { + req.RespondError(err) + return + } + } + + if uid != NOUID || gid != NOUID { + e := os.Chown(fid.path, int(uid), int(gid)) + if e != nil { + req.RespondError(toError(e)) + return + } + } + + if dir.Name != "" { + fmt.Printf("Rename %s to %s\n", fid.path, dir.Name) + // if first char is / it is relative to root, else relative to + // cwd. + var destpath string + if dir.Name[0] == '/' { + destpath = path.Join(u.Root, dir.Name) + fmt.Printf("/ results in %s\n", destpath) + } else { + fiddir, _ := path.Split(fid.path) + destpath = path.Join(fiddir, dir.Name) + fmt.Printf("rel results in %s\n", destpath) + } + err := os.Rename(fid.path, destpath) + fmt.Printf("rename %s to %s gets %v\n", fid.path, destpath, err) + if err != nil { + req.RespondError(toError(err)) + return + } + fid.path = destpath + } + + if dir.Length != 0xFFFFFFFFFFFFFFFF { + e := os.Truncate(fid.path, int64(dir.Length)) + if e != nil { + req.RespondError(toError(e)) + return + } + } + + // If either mtime or atime need to be changed, then + // we must change both. + if dir.Mtime != ^uint32(0) || dir.Atime != ^uint32(0) { + mt, at := time.Unix(int64(dir.Mtime), 0), time.Unix(int64(dir.Atime), 0) + if cmt, cat := (dir.Mtime == ^uint32(0)), (dir.Atime == ^uint32(0)); cmt || cat { + st, e := os.Stat(fid.path) + if e != nil { + req.RespondError(toError(e)) + return + } + switch cmt { + case true: + mt = st.ModTime() + default: + // at = time.Time(0)//atime(st.Sys().(*syscall.Stat_t)) + } + } + // macOS filesystem st_mtime values are only accurate to the second + // this ensures, 9p will only write mtime to the second #1375 + e := os.Chtimes(fid.path, at.Truncate(time.Second), mt.Truncate(time.Second)) + if e != nil { + req.RespondError(toError(e)) + return + } + } + + req.RespondRwstat() +} diff --git a/translations/de.json b/translations/de.json index 6f179495a3..ab60064092 100644 --- a/translations/de.json +++ b/translations/de.json @@ -1,19 +1,15 @@ { "\"The '{{.minikube_addon}}' addon is disabled": "", - "\"{{.name}}\" profile does not exist": "", + "\"{{.context}}\" context has been updated to point to {{.hostname}}:{{.port}}": "", + "\"{{.machineName}}\" does not exist, nothing to stop": "", "\"{{.name}}\" profile does not exist, trying anyways.": "", - "\"{{.node_name}}\" stopped.": "", - "\"{{.profile_name}}\" does not exist, nothing to stop": "", - "\"{{.profile_name}}\" host does not exist, unable to show an IP": "", "'none' driver does not support 'minikube docker-env' command": "", "'none' driver does not support 'minikube mount' command": "", "'none' driver does not support 'minikube podman-env' command": "", "'none' driver does not support 'minikube ssh' command": "", "'{{.driver}}' driver reported an issue: {{.error}}": "", - "'{{.profile}}' is not running": "", - "- {{.profile}}": "", "A VPN or firewall is interfering with HTTP access to the minikube VM. Alternatively, try a different VM driver: https://minikube.sigs.k8s.io/docs/start/": "", - "A firewall is blocking Docker the minikube VM from reaching the internet. You may need to configure it to use a proxy.": "", + "A firewall is blocking Docker the minikube VM from reaching the image repository. You may need to select --image-repository, or use a proxy.": "", "A firewall is interfering with minikube's ability to make outgoing HTTPS requests. You may need to change the value of the HTTPS_PROXY environment variable.": "", "A firewall is likely blocking minikube from reaching the internet. You may need to configure minikube to use a proxy.": "", "A set of apiserver IP Addresses which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine": "", @@ -32,30 +28,31 @@ "Adds a node to the given cluster config, and starts it.": "", "Adds a node to the given cluster.": "", "Advanced Commands:": "", - "After minikube restart the dockerd ports might have changed. To ensure docker-env works properly.\nPlease re-eval the docker-env command:\n\n\t'minikube -p {{.profile_name}} docker-env'": "", "Aliases": "", "Allow user prompts for more information": "", "Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to \\\"auto\\\" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers": "Alternatives Bild-Repository zum Abrufen von Docker-Images. Dies ist hilfreich, wenn Sie nur eingeschränkten Zugriff auf gcr.io haben. Stellen Sie \\\"auto\\\" ein, dann wählt minikube eins für sie aus. Nutzer vom chinesischen Festland können einen lokalen gcr.io-Mirror wie registry.cn-hangzhou.aliyuncs.com/google_containers verwenden.", "Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g)": "Größe des der minikube-VM zugewiesenen Arbeitsspeichers (Format: \u003cNummer\u003e [\u003cEinheit\u003e], wobei Einheit = b, k, m oder g)", - "Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "", + "Amount of RAM to allocate to Kubernetes (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "", "Amount of time to wait for a service in seconds": "", "Amount of time to wait for service in seconds": "", "Another hypervisor, such as VirtualBox, is conflicting with KVM. Please stop the other hypervisor, or use --driver to switch to it.": "", + "Another program is using a file required by minikube. If you are using Hyper-V, try stopping the minikube VM from within the Hyper-V manager": "", "Automatically selected the {{.driver}} driver": "", "Automatically selected the {{.driver}} driver. Other choices: {{.alternates}}": "", "Available Commands": "", "Basic Commands:": "", "Because you are using docker driver on Mac, the terminal needs to be open to run it.": "", "Bind Address: {{.Address}}": "", - "Block until the apiserver is servicing API requests": "", + "Both driver={{.driver}} and vm-driver={{.vmd}} have been set.\n\n Since vm-driver is deprecated, minikube will default to driver={{.driver}}.\n\n If vm-driver is set in the global config, please run \"minikube config unset vm-driver\" to resolve this warning.": "", "Cannot find directory {{.path}} for mount": "", "Cannot use both --output and --format options": "", "Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start": "", "Check that SELinux is disabled, and that the provided apiserver flags are valid": "", "Check that minikube is running and that you have specified the correct namespace (-n flag) if required.": "", - "Check that the provided apiserver flags are valid": "", + "Check that the provided apiserver flags are valid, and that SELinux is disabled": "", "Check that your --kubernetes-version has a leading 'v'. For example: 'v1.1.14'": "", "Check your firewall rules for interference, and run 'virt-host-validate' to check for KVM configuration issues. If you are running minikube within a VM, consider using --driver=none": "", + "Choose a smaller value for --memory, such as 2000": "", "Configuration and Management Commands:": "", "Configure a default route on this Linux host, or use another --driver that does not require it": "", "Configure an external network switch following the official documentation, then add `--hyperv-virtual-switch=\u003cswitch-name\u003e` to `minikube start`": "", @@ -66,9 +63,9 @@ "Could not process error from failed deletion": "", "Could not process errors from failed deletion": "", "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.": "Ländercode des zu verwendenden Image Mirror. Lassen Sie dieses Feld leer, um den globalen zu verwenden. Nutzer vom chinesischen Festland stellen cn ein.", - "Creating Kubernetes in {{.driver_name}} container with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "", + "Creating Kubernetes in {{.driver_name}} {{.machine_type}} with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "", "Creating mount {{.name}} ...": "Bereitstellung {{.name}} wird erstellt...", - "Creating {{.driver_name}} VM (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", + "Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", "DEPRECATED, use `driver` instead.": "", "Default group id used for the mount": "", "Default user id used for the mount": "", @@ -97,10 +94,10 @@ "Done! kubectl is now configured to use \"{{.name}}\"": "", "Done! kubectl is now configured to use \"{{.name}}__1": "Fertig! kubectl ist jetzt für die Verwendung von \"{{.name}}\" konfiguriert", "Download complete!": "Download abgeschlossen!", + "Downloading Kubernetes {{.version}} preload ...": "", "Downloading VM boot image ...": "", "Downloading driver {{.driver}}:": "", - "Downloading preloaded images tarball for k8s {{.version}} ...": "", - "Downloading {{.name}} {{.version}}": "", + "Due to {{.driver_name}} networking limitations on {{.os_name}}, {{.addon_name}} addon is not supported for this driver.\nAlternatively to use this addon you can use a vm-based driver:\n\n\t'minikube start --vm=true'\n\nTo track the update on this work in progress feature please check:\nhttps://github.com/kubernetes/minikube/issues/7332": "", "ERROR creating `registry-creds-acr` secret": "", "ERROR creating `registry-creds-dpr` secret": "", "ERROR creating `registry-creds-ecr` secret: {{.error}}": "", @@ -109,7 +106,6 @@ "Enable addons. see `minikube addons list` for a list of valid addon names.": "", "Enable experimental NVIDIA GPU support in minikube": "Experimentellen NVIDIA GPU-Support in minikube aktivieren", "Enable host resolver for NAT DNS requests (virtualbox driver only)": "Host Resolver für NAT DNS-Anfragen aktivieren (nur Virtualbox-Treiber)", - "Enable istio needs {{.minMem}} MB of memory and {{.minCpus}} CPUs.": "", "Enable proxy for NAT DNS requests (virtualbox driver only)": "Proxy für NAT-DNS-Anforderungen aktivieren (nur Virtualbox-Treiber)", "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \\\"--network-plugin=cni\\": "Standard-CNI-Plugin-in (/etc/cni/net.d/k8s.conf) aktivieren. Wird in Verbindung mit \"--network-plugin = cni\" verwendet", "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \\\"--network-plugin=cni\\\".": "", @@ -122,7 +118,6 @@ "Ensure that the user listed in /etc/libvirt/qemu.conf has access to your home directory": "", "Ensure that your value for HTTPS_PROXY points to an HTTPS proxy rather than an HTTP proxy": "", "Environment variables to pass to the Docker daemon. (format: key=value)": "Umgebungsvariablen, die an den Docker-Daemon übergeben werden. (Format: Schlüssel = Wert)", - "Error adding node to cluster": "", "Error checking driver version: {{.error}}": "Fehler beim Prüfen der Treiberversion: {{.error}}", "Error creating minikube directory": "", "Error creating view template": "", @@ -131,45 +126,29 @@ "Error finding port for mount": "", "Error generating set output": "", "Error generating unset output": "", - "Error getting IP": "", - "Error getting client": "", - "Error getting client: {{.error}}": "", - "Error getting cluster": "", "Error getting cluster bootstrapper": "", "Error getting cluster config": "", - "Error getting config": "", - "Error getting control plane": "", "Error getting host": "", - "Error getting host IP": "", - "Error getting host status": "", - "Error getting machine logs": "", "Error getting port binding for '{{.driver_name}} driver: {{.error}}": "", "Error getting primary control plane": "", - "Error getting primary cp": "", - "Error getting service status": "", "Error getting service with namespace: {{.namespace}} and labels {{.labelName}}:{{.addonName}}: {{.error}}": "", "Error getting ssh client": "", "Error getting the host IP address to use from within the VM": "", - "Error host driver ip status": "", "Error killing mount process": "", - "Error loading api": "", - "Error loading profile config": "", "Error loading profile config: {{.error}}": "", "Error loading profile {{.name}}: {{.error}}": "Fehler beim Laden des Profils {{.name}}: {{.error}}", "Error opening service": "", "Error parsing Driver version: {{.error}}": "Fehler beim Parsen der Driver-Version: {{.error}}", "Error parsing minikube version: {{.error}}": "Fehler beim Parsen der minikube-Version: {{.error}}", "Error reading {{.path}}: {{.error}}": "", - "Error retrieving node": "", "Error starting cluster": "", "Error starting mount": "", - "Error starting node": "", "Error while setting kubectl current context : {{.error}}": "", "Error writing mount pid": "", - "Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}\"": "", "Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}": "Fehler: Sie haben Kubernetes v{{.new}} ausgewählt, aber auf dem vorhandenen Cluster für Ihr Profil wird Kubernetes v{{.old}} ausgeführt. Zerstörungsfreie Downgrades werden nicht unterstützt. Sie können jedoch mit einer der folgenden Optionen fortfahren:\n* Erstellen Sie den Cluster mit Kubernetes v{{.new}} neu: Führen Sie \"minikube delete {{.profile}}\" und dann \"minikube start {{.profile}} - kubernetes-version = {{.new}}\" aus.\n* Erstellen Sie einen zweiten Cluster mit Kubernetes v{{.new}}: Führen Sie \"minikube start -p \u003cnew name\u003e --kubernetes-version = {{.new}}\" aus.\n* Verwenden Sie den vorhandenen Cluster mit Kubernetes v {{.old}} oder höher: Führen Sie \"minikube start {{.profile}} --kubernetes-version = {{.old}}\" aus.", - "Error: [{{.id}}] {{.error}}": "", "Examples": "", + "Executing \"{{.command}}\" took an unusually long time: {{.duration}}": "", + "Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'": "", "Exiting": "Wird beendet", "Exiting.": "", "External Adapter on which external switch will be created if no external switch is found. (hyperv driver only)": "", @@ -177,21 +156,21 @@ "Failed to cache ISO": "", "Failed to cache and load images": "", "Failed to cache binaries": "", + "Failed to cache images": "", "Failed to cache images to tar": "", "Failed to cache kubectl": "", "Failed to change permissions for {{.minikube_dir_path}}: {{.error}}": "Fehler beim Ändern der Berechtigungen für {{.minikube_dir_path}}: {{.error}}", - "Failed to check if machine exists": "", "Failed to check main repository and mirrors for images for images": "", + "Failed to delete cluster {{.name}}, proceeding with retry anyway.": "", "Failed to delete cluster: {{.error}}": "Fehler beim Löschen des Clusters: {{.error}}", "Failed to delete cluster: {{.error}}__1": "Fehler beim Löschen des Clusters: {{.error}}", "Failed to delete images": "", "Failed to delete images from config": "", - "Failed to delete node {{.name}}": "", "Failed to enable container runtime": "", "Failed to generate config": "", + "Failed to get API Server URL": "", "Failed to get bootstrapper": "", "Failed to get command runner": "", - "Failed to get driver URL": "", "Failed to get image map": "", "Failed to get machine client": "", "Failed to get service URL: {{.error}}": "", @@ -202,29 +181,31 @@ "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}": "NO_PROXY Env konnte nicht festgelegt werden. Benutzen Sie `export NO_PROXY = $ NO_PROXY, {{. Ip}}", "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.": "", "Failed to setup certs": "", - "Failed to setup kubeconfig": "", - "Failed to start node {{.name}}": "", "Failed to stop node {{.name}}": "", "Failed to update cluster": "", "Failed to update config": "", + "Failed to validate '{{.driver}}' driver": "", "Failed unmount: {{.error}}": "", "File permissions used for the mount": "", + "Filter to use only VM Drivers": "", "Flags": "", "Follow": "", "For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "Für beste Ergebnisse installieren Sie kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/", "For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/__1": "Für beste Ergebnisse installieren Sie kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/", "For more information, see:": "Weitere Informationen:", + "For more information, see: https://minikube.sigs.k8s.io/docs/reference/drivers/none/": "", "Force environment to be configured for a specified shell: [fish, cmd, powershell, tcsh, bash, zsh], default is auto-detect": "", "Force minikube to perform possibly dangerous operations": "minikube zwingen, möglicherweise gefährliche Operationen durchzuführen", "Found network options:": "Gefundene Netzwerkoptionen:", "Found {{.number}} invalid profile(s) !": "", + "Generate unable to parse disk size '{{.diskSize}}': {{.error}}": "", + "Generate unable to parse memory '{{.memory}}': {{.error}}": "", "Gets the kubernetes URL(s) for the specified service in your local cluster": "", "Gets the kubernetes URL(s) for the specified service in your local cluster. In the case of multiple URLs they will be printed one at a time.": "", "Gets the logs of the running instance, used for debugging minikube, not user code.": "", "Gets the status of a local kubernetes cluster": "", "Gets the status of a local kubernetes cluster.\n\tExit status contains the status of minikube's VM, cluster and kubernetes encoded on it's bits in this order from right to left.\n\tEg: 7 meaning: 1 (for minikube NOK) + 2 (for cluster NOK) + 4 (for kubernetes NOK)": "", "Gets the value of PROPERTY_NAME from the minikube config file": "", - "Getting machine config failed": "", "Global Flags": "", "Go template format string for the cache list output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#CacheListTemplate": "", "Go template format string for the config view output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate": "", @@ -235,6 +216,8 @@ "Hyperkit is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "", "Hyperkit networking is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "", "If set, automatically updates drivers to the latest version. Defaults to true.": "", + "If set, delete the current cluster if start fails and try again. Defaults to false.": "", + "If set, download tarball of preloaded images if available to improve start time. Defaults to true.": "", "If set, install addons. Defaults to true.": "", "If set, pause all namespaces": "", "If set, unpause all namespaces": "", @@ -251,8 +234,9 @@ "Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added.": "Unsichere Docker-Registrys, die an den Docker-Daemon übergeben werden. Der CIDR-Bereich des Standarddienstes wird automatisch hinzugefügt.", "Install VirtualBox, or select an alternative value for --driver": "", "Install the latest hyperkit binary, and run 'minikube delete'": "", - "Invalid size passed in argument: {{.error}}": "", "IsEnabled failed": "", + "Istio needs {{.minCPUs}} CPUs -- your configuration only allocates {{.cpus}} CPUs": "", + "Istio needs {{.minMem}}MB of memory -- your configuration only allocates {{.memory}}MB": "", "Kill the mount process spawned by minikube start": "", "Kubernetes {{.new}} is now available. If you would like to upgrade, specify: --kubernetes-version={{.new}}": "", "Kubernetes {{.version}} is not supported by this release of minikube": "", @@ -267,7 +251,7 @@ "Local folders to share with Guest via NFS mounts (hyperkit driver only)": "Lokale Ordner, die über NFS-Bereitstellungen für Gast freigegeben werden (nur Hyperkit-Treiber)", "Location of the VPNKit socket used for networking. If empty, disables Hyperkit VPNKitSock, if 'auto' uses Docker for Mac VPNKit connection, otherwise uses the specified VSock (hyperkit driver only)": "Speicherort des VPNKit-Sockets, der für das Netzwerk verwendet wird. Wenn leer, wird Hyperkit VPNKitSock deaktiviert. Wenn 'auto' die Docker for Mac VPNKit-Verbindung verwendet, wird andernfalls der angegebene VSock verwendet (nur Hyperkit-Treiber).", "Location of the minikube iso": "Speicherort der minikube-ISO", - "Location of the minikube iso.": "", + "Locations to fetch the minikube ISO from.": "", "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'": "", "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'.": "", "Message Size: {{.size}}": "", @@ -275,6 +259,7 @@ "Minikube is a tool for managing local Kubernetes clusters.": "", "Modify minikube config": "", "Modify minikube's kubernetes addons": "", + "Most users should use the newer 'docker' driver instead, which does not require root!": "", "Mount type: {{.name}}": "", "Mounting host path {{.sourcePath}} into VM as {{.destinationPath}} ...": "", "Mounts the specified directory into minikube": "", @@ -284,18 +269,23 @@ "NIC Type used for host only network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only)": "", "NOTE: This process must stay alive for the mount to be accessible ...": "", "Networking and Connectivity Commands:": "", + "No changes required for the \"{{.context}}\" context": "", "No minikube profile was found. You can create one using `minikube start`.": "", - "Node may be unable to resolve external DNS records": "", + "Node \"{{.node_name}}\" stopped.": "", "Node operations": "", + "Node {{.name}} failed to start, deleting and trying again.": "", "Node {{.name}} was successfully deleted.": "", + "Node {{.nodeName}} does not exist.": "", + "Non-destructive downgrades are not supported, but you can proceed with one of the following options:\n\n 1) Recreate the cluster with Kubernetes v{{.new}}, by running:\n\n minikube delete{{.profile}}\n minikube start{{.profile}} --kubernetes-version={{.new}}\n\n 2) Create a second cluster with Kubernetes v{{.new}}, by running:\n\n minikube start -p {{.suggestedName}} --kubernetes-version={{.new}}\n\n 3) Use the existing cluster at version Kubernetes v{{.old}}, by running:\n\n minikube start{{.profile}} --kubernetes-version={{.old}}": "", "None of the known repositories in your location are accessible. Using {{.image_repository_name}} as fallback.": "Keines der bekannten Repositories an Ihrem Standort ist zugänglich. {{.image_repository_name}} wird als Fallback verwendet.", "None of the known repositories is accessible. Consider specifying an alternative image repository with --image-repository flag": "Keines der bekannten Repositories ist zugänglich. Erwägen Sie, ein alternatives Image-Repository mit der Kennzeichnung --image-repository anzugeben", "Not passing {{.name}}={{.value}} to docker env.": "", - "Noticed that you are using minikube docker-env:": "", + "Noticed you have an activated docker-env on {{.driver_name}} driver in this terminal:": "", + "Number of CPUs allocated to Kubernetes.": "", "Number of CPUs allocated to the minikube VM": "Anzahl der CPUs, die der minikube-VM zugeordnet sind", - "Number of CPUs allocated to the minikube VM.": "", "Number of lines back to go within the log": "", "OS release is {{.pretty_name}}": "", + "One of 'yaml' or 'json'.": "", "Open the addons URL with https instead of http": "", "Open the service URL with https instead of http": "", "Opening kubernetes service {{.namespace_name}}/{{.service_name}} in default browser...": "", @@ -314,48 +304,61 @@ "Please install the minikube hyperkit VM driver, or select an alternative --driver": "", "Please install the minikube kvm2 VM driver, or select an alternative --driver": "", "Please make sure the service you are looking for is deployed or is in the correct namespace.": "", + "Please re-eval your docker-env, To ensure your environment variables have updated ports: \n\n\t'minikube -p {{.profile_name}} docker-env'": "", "Please specify the directory to be mounted: \n\tminikube mount \u003csource directory\u003e:\u003ctarget directory\u003e (example: \"/host-home:/vm-home\")": "", "Please upgrade the '{{.driver_executable}}'. {{.documentation_url}}": "Aktualisieren Sie '{{.driver_executable}}'. {{.documentation_url}}", "Populates the specified folder with documentation in markdown about minikube": "", "Powering off \"{{.profile_name}}\" via SSH ...": "{{.profile_name}}\" wird über SSH ausgeschaltet...", "Preparing Kubernetes {{.k8sVersion}} on {{.runtime}} {{.runtimeVersion}} ...": "Vorbereiten von Kubernetes {{.k8sVersion}} auf {{.runtime}} {{.runtimeVersion}}...", "Print current and latest version number": "", + "Print just the version number.": "", "Print the version of minikube": "", "Print the version of minikube.": "", "Problems detected in {{.entry}}:": "", "Problems detected in {{.name}}:": "", "Profile gets or sets the current minikube profile": "", - "Profile name \"{{.profilename}}\" is minikube keyword. To delete profile use command minikube delete -p \u003cprofile name\u003e": "", + "Profile name \"{{.profilename}}\" is reserved keyword. To delete this profile, run: \"{{.cmd}}\"": "", "Provide VM UUID to restore MAC address (hyperkit driver only)": "Geben Sie die VM-UUID an, um die MAC-Adresse wiederherzustellen (nur Hyperkit-Treiber)", + "Pulling base image ...": "", "Reboot to complete VirtualBox installation, verify that VirtualBox is not blocked by your system, and/or use another hypervisor": "", "Rebuild libvirt with virt-network support": "", "Received {{.name}} signal": "", - "Reconfiguring existing host ...": "", "Registry mirrors to pass to the Docker daemon": "Registry-Mirror, die an den Docker-Daemon übergeben werden", "Reinstall VirtualBox and reboot. Alternatively, try the kvm2 driver: https://minikube.sigs.k8s.io/docs/reference/drivers/kvm2/": "", "Reinstall VirtualBox and verify that it is not blocked: System Preferences -\u003e Security \u0026 Privacy -\u003e General -\u003e Some system software was blocked from loading": "", + "Related issue: {{.url}}": "", "Related issues:": "", "Relaunching Kubernetes using {{.bootstrapper}} ...": "Kubernetes mit {{.bootstrapper}} neu starten...", + "Remove the incompatible --docker-opt flag if one was provided": "", "Removed all traces of the \"{{.name}}\" cluster.": "", "Removing {{.directory}} ...": "{{.directory}} wird entfernt...", "Requested cpu count {{.requested_cpus}} is less than the minimum allowed of {{.minimum_cpus}}": "", "Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}": "Die angeforderte Festplattengröße {{.requested_size}} liegt unter dem Mindestwert von {{.minimum_size}}.", "Requested memory allocation ({{.memory}}MB) is less than the default memory allocation of {{.default_memorysize}}MB. Beware that minikube might not work correctly or crash unexpectedly.": "Die angeforderte Speicherzuordnung ({{.memory}} MB) ist geringer als die Standardspeicherzuordnung von {{.default_memorysize}} MB. Beachten Sie, dass minikube möglicherweise nicht richtig funktioniert oder unerwartet abstürzt.", + "Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommended}}MB. Kubernetes may crash unexpectedly.": "", "Requested memory allocation {{.requested_size}} is less than the minimum allowed of {{.minimum_size}}": "Die angeforderte Speicherzuweisung {{.requested_size}} liegt unter dem zulässigen Mindestwert von {{.minimum_size}}.", + "Requested memory allocation {{.requested}}MB is less than the usable minimum of {{.minimum}}MB": "", + "Restart Docker": "", + "Restarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "", + "Restarting the {{.name}} service may improve performance.": "", "Retrieve the ssh identity key path of the specified cluster": "", "Retrieve the ssh identity key path of the specified cluster.": "", "Retrieves the IP address of the running cluster": "", "Retrieves the IP address of the running cluster, and writes it to STDOUT.": "", "Retrieves the IP address of the running cluster, checks it\n\t\t\twith IP in kubeconfig, and corrects kubeconfig if incorrect.": "", "Returns the value of PROPERTY_NAME from the minikube config file. Can be overwritten at runtime by flags or environmental variables.": "", + "Right-click the PowerShell icon and select Run as Administrator to open PowerShell in elevated mode.": "", "Run 'kubectl describe pod coredns -n kube-system' and check for a firewall or DNS conflict": "", "Run 'minikube delete' to delete the stale VM, or and ensure that minikube is running as the same user you are issuing this command with": "", + "Run 'sudo sysctl fs.protected_regular=1', or try a driver which does not require root, such as '--driver=docker'": "", "Run kubectl": "", "Run minikube from the C: drive.": "", "Run the kubernetes client, download it if necessary. Remember -- after kubectl!\n\nExamples:\nminikube kubectl -- --help\nminikube kubectl -- get pods --namespace kube-system": "", - "Run the minikube command as an Administrator": "", "Run: 'chmod 600 $HOME/.kube/config'": "", + "Run: 'kubectl delete clusterrolebinding kubernetes-dashboard'": "", + "Run: 'sudo mkdir /sys/fs/cgroup/systemd \u0026\u0026 sudo mount -t cgroup -o none,name=systemd cgroup /sys/fs/cgroup/systemd'": "", "Running on localhost (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", + "Service '{{.service}}' was not found in '{{.namespace}}' namespace.\nYou may select another namespace by using 'minikube service {{.service}} -n \u003cnamespace\u003e'. Or list out all the services using 'minikube service list'": "", "Set failed": "", "Set flag to delete all profiles": "", "Set this flag to delete the '.minikube' folder from your user directory.": "", @@ -370,6 +373,7 @@ "Show only log entries which point to known problems": "", "Show only the most recent journal entries, and continuously print new entries as they are appended to the journal.": "", "Skipped switching kubectl context for {{.profile_name}} because --keep-context was set.": "", + "Sorry, Kubernetes v{{.k8sVersion}} requires conntrack to be installed in root's path": "", "Sorry, Kubernetes {{.version}} is not supported by this release of minikube": "", "Sorry, completion support is not yet implemented for {{.name}}": "", "Sorry, the kubeadm.{{.parameter_name}} parameter is currently not supported by --extra-config": "Leider wird der Parameter kubeadm.{{.parameter_name}} momentan von --extra-config nicht unterstützt.", @@ -381,8 +385,10 @@ "Specify the 9p version that the mount should use": "", "Specify the ip that the mount should be setup on": "", "Specify the mount filesystem type (supported types: 9p)": "", - "Starting existing {{.driver_name}} VM for \"{{.profile_name}}\" ...": "", - "Starting node": "", + "Start failed after cluster deletion": "", + "StartHost failed, but will try again: {{.error}}": "", + "Starting control plane node {{.name}} in cluster {{.cluster}}": "", + "Starting node {{.name}} in cluster {{.cluster}}": "", "Starting tunnel for service {{.service}}.": "", "Starts a local kubernetes cluster": "Startet einen lokalen Kubernetes-Cluster", "Starts a node.": "", @@ -395,16 +401,16 @@ "Successfully added {{.name}} to {{.cluster}}!": "", "Successfully deleted all profiles": "", "Successfully mounted {{.sourcePath}} to {{.destinationPath}}": "", - "Successfully powered off Hyper-V. minikube driver -- {{.driver}}": "", "Successfully purged minikube directory located at - [{{.minikubeDirectory}}]": "", "Suggestion: {{.advice}}": "", "Suggestion: {{.fix}}": "", "Target directory {{.path}} must be an absolute path": "", - "The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube --driver={{.driver_name}}'.": "", "The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube --vm-driver={{.driver_name}}": "Der Treiber \"{{.driver_name}}\" benötigt Root-Rechte. Führen Sie minikube aus mit 'sudo minikube --vm-driver = {{. Driver_name}}.", + "The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube start --driver={{.driver_name}}'.": "", "The \"{{.driver_name}}\" driver should not be used with root privileges.": "", "The \"{{.name}}\" cluster has been deleted.": "Der Cluster \"{{.name}}\" wurde gelöscht.", "The \"{{.name}}\" cluster has been deleted.__1": "Der Cluster \"{{.name}}\" wurde gelöscht.", + "The 'none' driver is designed for experts who need to integrate with an existing VM": "", "The 'none' driver provides limited isolation and may reduce system security and reliability.": "Der Treiber \"Keine\" bietet eine eingeschränkte Isolation und beeinträchtigt möglicherweise Sicherheit und Zuverlässigkeit des Systems.", "The '{{.addonName}}' addon is enabled": "", "The '{{.driver}}' driver requires elevated permissions. The following commands will be executed:\\n\\n{{ .example }}\\n": "", @@ -420,19 +426,23 @@ "The VM driver exited with an error, and may be corrupt. Run 'minikube start' with --alsologtostderr -v=8 to see the error": "", "The VM that minikube is configured for no longer exists. Run 'minikube delete'": "", "The apiserver listening port": "Der Überwachungsport des API-Servers", - "The apiserver name which is used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine": "", "The apiserver name which is used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine": "Der API-Servername, der im generierten Zertifikat für Kubernetes verwendet wird. Damit kann der API-Server von außerhalb des Computers verfügbar gemacht werden.", "The argument to pass the minikube mount command on start": "Das Argument, um den Bereitstellungsbefehl für minikube beim Start zu übergeben", "The argument to pass the minikube mount command on start.": "", + "The authoritative apiserver hostname for apiserver certificates and connectivity. This can be used if you want to make the apiserver available from outside the machine": "", "The cluster dns domain name used in the kubernetes cluster": "Der DNS-Domänenname des Clusters, der im Kubernetes-Cluster verwendet wird", "The container runtime to be used (docker, crio, containerd)": "Die zu verwendende Container-Laufzeit (Docker, Crio, Containerd)", "The container runtime to be used (docker, crio, containerd).": "", + "The control plane for \"{{.name}}\" is paused!": "", + "The control plane node \"{{.name}}\" does not exist.": "", + "The control plane node is not running (state={{.state}})": "", + "The control plane node must be running for this command": "", "The cri socket path to be used": "Der zu verwendende Cri-Socket-Pfad", "The cri socket path to be used.": "", - "The docker service within '{{.profile}}' is not active": "", + "The docker service within '{{.name}}' is not active": "", + "The docker-env command is only compatible with the \"docker\" runtime, but this cluster was configured to use the \"{{.runtime}}\" runtime.": "", "The driver '{{.driver}}' is not supported on {{.os}}": "Der Treiber '{{.driver}}' wird auf {{.os}} nicht unterstützt", - "The driver {{.experimental}} '{{.driver}}' is not supported on {{.os}}": "", - "The existing \"{{.profile_name}}\" VM that was created using the \"{{.old_driver}}\" driver, and is incompatible with the \"{{.driver}}\" driver.": "", + "The existing \"{{.name}}\" VM was created using the \"{{.old}}\" driver, and is incompatible with the \"{{.new}}\" driver.": "", "The hyperv virtual switch name. Defaults to first found. (hyperv driver only)": "Der Name des virtuellen Hyperv-Switch. Standardmäßig zuerst gefunden. (nur Hyperv-Treiber)", "The hypervisor does not appear to be configured properly. Run 'minikube start --alsologtostderr -v=1' and inspect the error code": "", "The initial time interval for each check that wait performs in seconds": "", @@ -444,10 +454,13 @@ "The name of the node to delete": "", "The name of the node to start": "", "The node to get logs from. Defaults to the primary control plane.": "", + "The node to ssh into. Defaults to the primary control plane.": "", + "The none driver is not compatible with multi-node clusters.": "", "The number of bytes to use for 9p packet payload": "", + "The number of nodes to spin up. Defaults to 1.": "", "The output format. One of 'json', 'table'": "", "The path on the file system where the docs in markdown need to be saved": "", - "The podman service within '{{.profile}}' is not active": "", + "The podman service within '{{.cluster}}' is not active": "", "The service namespace": "", "The service {{.service}} requires privileged ports to be exposed: {{.ports}}": "", "The services namespace": "", @@ -456,46 +469,63 @@ "The value passed to --format is invalid: {{.error}}": "", "The vmwarefusion driver is deprecated and support for it will be removed in a future release.\n\t\t\tPlease consider switching to the new vmware unified driver, which is intended to replace the vmwarefusion driver.\n\t\t\tSee https://minikube.sigs.k8s.io/docs/reference/drivers/vmware/ for more information.\n\t\t\tTo disable this message, run [minikube config set ShowDriverDeprecationNotification false]": "", "The {{.driver_name}} driver should not be used with root privileges.": "Der Treiber {{.driver_name}} sollte nicht mit Root-Rechten verwendet werden.", + "There is no local cluster named \"{{.cluster}}\"": "", "There's a new version for '{{.driver_executable}}'. Please consider upgrading. {{.documentation_url}}": "Es gibt eine neue Version für '{{.driver_executable}}'. Bitte erwägen Sie ein Upgrade. {{.documentation_url}}", "These changes will take effect upon a minikube delete and then a minikube start": "", "This addon does not have an endpoint defined for the 'addons open' command.\nYou can add one by annotating a service with the label {{.labelName}}:{{.addonName}}": "", "This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true": "Dies kann auch automatisch erfolgen, indem Sie die env var CHANGE_MINIKUBE_NONE_USER = true setzen", + "This control plane is not running! (state={{.state}})": "", + "This driver does not yet work on your architecture. Maybe try --driver=none": "", + "This is unusual - you may want to investigate using \"{{.command}}\"": "", "This will keep the existing kubectl context and will create a minikube context.": "Dadurch wird der vorhandene Kubectl-Kontext beibehalten und ein minikube-Kontext erstellt.", "This will start the mount daemon and automatically mount files into minikube": "Dadurch wird der Mount-Daemon gestartet und die Dateien werden automatisch in minikube geladen", "This will start the mount daemon and automatically mount files into minikube.": "", + "This {{.type}} is having trouble accessing https://{{.repository}}": "", + "Tip: To remove this root owned cluster, run: sudo {{.cmd}}": "", "Tip: To remove this root owned cluster, run: sudo {{.cmd}} delete": "Tipp: Um diesen Root-Cluster zu entfernen, führen Sie Folgendes aus: sudo {{.cmd}} delete", "To connect to this cluster, use: kubectl --context={{.name}}": "Verwenden Sie zum Herstellen einer Verbindung zu diesem Cluster: kubectl --context = {{.name}}", "To connect to this cluster, use: kubectl --context={{.name}}__1": "Verwenden Sie zum Herstellen einer Verbindung zu diesem Cluster: kubectl --context = {{.name}}", "To connect to this cluster, use: kubectl --context={{.profile_name}}": "", "To disable this notice, run: 'minikube config set WantUpdateNotification false'\\n": "", - "To proceed, either:\n\n 1) Delete the existing \"{{.profile_name}}\" cluster using: '{{.command}} delete'\n\n * or *\n\n 2) Start the existing \"{{.profile_name}}\" cluster using: '{{.command}} start --driver={{.old_driver}}'": "", + "To fix this, run: \"{{.command}}\"": "", + "To proceed, either:\n\n1) Delete the existing \"{{.name}}\" cluster using: '{{.delcommand}}'\n\n* or *\n\n2) Start the existing \"{{.name}}\" cluster using: '{{.command}} --driver={{.old}}'": "", + "To pull new external images, you may need to configure a proxy: https://minikube.sigs.k8s.io/docs/reference/networking/proxy/": "", "To see addons list for other profiles use: `minikube addons -p name list`": "", - "To start minikube with HyperV Powershell must be in your PATH`": "", + "To start minikube with Hyper-V, Powershell must be in your PATH`": "", "To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:": "Möglicherweise müssen Sie Kubectl- oder minikube-Befehle verschieben, um sie als eigenen Nutzer zu verwenden. Um beispielsweise Ihre eigenen Einstellungen zu überschreiben, führen Sie aus:", "Troubleshooting Commands:": "", + "Try 'minikube delete' to force new SSL certificates to be installed": "", + "Try 'minikube delete', and disable any conflicting VPN or firewall software": "", + "Try specifying a --driver, or see https://minikube.sigs.k8s.io/docs/start/": "", "Trying to delete invalid profile {{.profile}}": "", "Unable to bind flags": "", - "Unable to determine a default driver to use. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/": "", "Unable to enable dashboard": "", "Unable to fetch latest version info": "", + "Unable to find control plane": "", "Unable to generate docs": "", "Unable to generate the documentation. Please ensure that the path specified is a directory, exists \u0026 you have permission to write to it.": "", "Unable to get VM IP address": "", "Unable to get addon status for {{.name}}: {{.error}}": "", "Unable to get bootstrapper: {{.error}}": "Bootstrapper kann nicht abgerufen werden: {{.error}}", + "Unable to get command runner": "", + "Unable to get control plane status: {{.error}}": "", "Unable to get current user": "", + "Unable to get forwarded endpoint": "", + "Unable to get machine status": "", "Unable to get runtime": "", - "Unable to get the status of the {{.name}} cluster.": "", "Unable to kill mount process: {{.error}}": "", "Unable to load cached images from config file.": "Zwischengespeicherte Bilder können nicht aus der Konfigurationsdatei geladen werden.", "Unable to load cached images: {{.error}}": "", "Unable to load config: {{.error}}": "Konfig kann nicht geladen werden: {{.error}}", + "Unable to load host": "", "Unable to parse \"{{.kubernetes_version}}\": {{.error}}": "\"{{.Kubernetes_version}}\" kann nicht geparst werden: {{.error}}", "Unable to parse default Kubernetes version from constants: {{.error}}": "", + "Unable to parse memory '{{.memory}}': {{.error}}": "", "Unable to parse oldest Kubernetes version from constants: {{.error}}": "", + "Unable to pick a default driver. Here is what was considered, in preference order:": "", "Unable to pull images, which may be OK: {{.error}}": "Bilder können nicht abgerufen werden, was möglicherweise kein Problem darstellt: {{.error}}", - "Unable to remove machine directory: %v": "", - "Unable to start VM. Please investigate and run 'minikube delete' if possible": "", + "Unable to remove machine directory": "", + "Unable to restart cluster, will reset it: {{.error}}": "", "Unable to stop VM": "", "Unable to update {{.driver}} driver: {{.error}}": "", "Unable to verify SSH connectivity: {{.error}}. Will retry...": "", @@ -506,6 +536,7 @@ "Unset the KUBECONFIG environment variable, or verify that it does not point to an empty or otherwise invalid path": "", "Unset variables instead of setting them": "", "Update server returned an empty list": "", + "Updating the running {{.driver_name}} \"{{.cluster}}\" {{.machine_type}} ...": "", "Upgrade to QEMU v3.1.0+, run 'virt-host-validate', or ensure that you are not running in a nested VM environment.": "", "Upgrading from Kubernetes {{.old}} to {{.new}}": "Upgrade von Kubernetes {{.old}} auf {{.new}}", "Usage": "", @@ -526,11 +557,10 @@ "Userspace file server:": "", "Using image repository {{.name}}": "Verwenden des Image-Repositorys {{.name}}", "Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!": "", - "Using the running {{.driver_name}} \"{{.profile_name}}\" VM ...": "", "Using the {{.driver}} driver based on existing profile": "", "Using the {{.driver}} driver based on user configuration": "", "VM driver is one of: %v": "VM-Treiber ist einer von: %v", - "VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository": "", + "Validation unable to parse disk size '{{.diskSize}}': {{.error}}": "", "Verify that your HTTP_PROXY and HTTPS_PROXY environment variables are set correctly.": "", "Verify the IP address of the running cluster in kubeconfig.": "", "Verifying dashboard health ...": "", @@ -541,62 +571,63 @@ "VirtualBox is broken. Disable real-time anti-virus software, reboot, and reinstall VirtualBox if the problem continues.": "", "VirtualBox is broken. Reinstall VirtualBox, reboot, and run 'minikube delete'.": "", "VirtualBox is unable to find its network interface. Try upgrading to the latest release and rebooting.": "", - "Virtualization support is disabled on your computer. If you are running minikube within a VM, try '--driver=none'. Otherwise, consult your systems BIOS manual for how to enable virtualization.": "", - "Wait failed": "", + "Virtualization support is disabled on your computer. If you are running minikube within a VM, try '--driver=docker'. Otherwise, consult your systems BIOS manual for how to enable virtualization.": "", "Wait failed: {{.error}}": "", "Wait until Kubernetes core services are healthy before exiting": "Warten Sie vor dem Beenden, bis die Kerndienste von Kubernetes fehlerfrei arbeiten", - "Waiting for cluster to come online ...": "", "Where to root the NFS Shares, defaults to /nfsshares (hyperkit driver only)": "Als Root für die NFS-Freigaben wird standardmäßig /nfsshares verwendet (nur Hyperkit-Treiber)", "Whether to use external switch over Default Switch if virtual switch not explicitly specified. (hyperv driver only)": "", "You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details": "Sie scheinen einen Proxy zu verwenden, aber Ihre NO_PROXY-Umgebung enthält keine minikube-IP ({{.ip_address}}). Weitere Informationen finden Sie unter {{.documentation_url}}", + "You can also use 'minikube kubectl -- get pods' to invoke a matching version": "", "You can delete them using the following command(s):": "", + "You cannot change the CPUs for an exiting minikube cluster. Please first delete the cluster.": "", + "You cannot change the Disk size for an exiting minikube cluster. Please first delete the cluster.": "", + "You cannot change the memory size for an exiting minikube cluster. Please first delete the cluster.": "", + "You have selected Kubernetes v{{.new}}, but the existing cluster is running Kubernetes v{{.old}}": "", "You may need to manually remove the \"{{.name}}\" VM from your hypervisor": "Möglicherweise müssen Sie die VM \"{{.name}}\" manuell von Ihrem Hypervisor entfernen", "You may need to stop the Hyper-V Manager and run `minikube delete` again.": "", "You must specify a service name": "", "Your host does not support KVM virtualization. Ensure that qemu-kvm is installed, and run 'virt-host-validate' to debug the problem": "", - "Your host does not support virtualization. If you are running minikube within a VM, try '--driver=none'. Otherwise, enable virtualization in your BIOS": "", + "Your host does not support virtualization. If you are running minikube within a VM, try '--driver=docker'. Otherwise, enable virtualization in your BIOS": "", "Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.": "", "Your minikube config refers to an unsupported driver. Erase ~/.minikube, and try again.": "", "Your minikube vm is not running, try minikube start.": "", + "[{{.id}}] {{.msg}} {{.error}}": "", + "adding node": "", "addon '{{.name}}' is currently not enabled.\nTo enable this addon run:\nminikube addons enable {{.name}}": "", "addon '{{.name}}' is not a valid addon packaged with minikube.\nTo see the list of available addons run:\nminikube addons list": "", "addons modifies minikube addons files using subcommands like \"minikube addons enable dashboard\"": "", - "api load": "", "bash completion failed": "", "call with cleanup=true to remove old tunnels": "", - "command runner": "", "config modifies minikube config files using subcommands like \"minikube config set driver kvm\"\nConfigurable fields:\\n\\n": "", "config view failed": "", - "creating api client": "", "dashboard service is not running: {{.error}}": "", + "deleting node": "", "disable failed": "", "dry-run mode. Validates configuration, but does not mutate system state": "", "dry-run validation complete!": "", "enable failed": "", "error creating clientset": "", - "error creating machine client": "", "error getting primary control plane": "", "error getting ssh port": "", "error parsing the input ip address for mount": "", "error starting tunnel": "", "error stopping tunnel": "", + "error: --output must be 'yaml' or 'json'": "", "failed to open browser: {{.error}}": "", - "getting config": "", - "getting primary control plane": "", "if true, will embed the certs in kubeconfig.": "", "if you want to create a profile you can by this command: minikube start -p {{.profile_name}}": "", + "initialization failed, will try again: {{.error}}": "", "kubeadm detected a TCP port conflict with another process: probably another local Kubernetes installation. Run lsof -p\u003cport\u003e to find the process and kill it": "", "kubectl and minikube configuration will be stored in {{.home_folder}}": "Konfiguration von Kubectl und minikube wird in {{.home_folder}} gespeichert", - "kubectl not found in PATH, but is required for the dashboard. Installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "", "kubectl proxy": "", - "loading config": "", + "libmachine failed": "", "logdir set failed": "", - "machine '{{.name}}' does not exist. Proceeding ahead with recreating VM.": "", "max time to wait per Kubernetes core services to be healthy.": "", "minikube addons list --output OUTPUT. json, list": "", "minikube is exiting due to an error. If the above message is not useful, open an issue:": "", + "minikube is not yet compatible with ChromeOS": "", "minikube is unable to access the Google Container Registry. You may need to configure it to use a HTTP proxy.": "", - "minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --driver\n\t- Use --force to override this connectivity check": "", + "minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --vm-driver\n\t- Use --force to override this connectivity check": "", "minikube profile was successfully set to {{.profile_name}}": "", "minikube status --output OUTPUT. json, text": "", "minikube {{.version}} is available! Download it: {{.url}}": "", @@ -605,14 +636,16 @@ "mount failed": "", "namespaces to pause": "", "namespaces to unpause": "", + "none driver does not support multi-node clusters": "", "not enough arguments ({{.ArgCount}}).\\nusage: minikube config set PROPERTY_NAME PROPERTY_VALUE": "", "pause containers": "", "profile sets the current minikube profile, or gets the current profile if no arguments are provided. This is used to run and manage multiple minikube instance. You can return to the default minikube profile by running `minikube profile default`": "", - "profile {{.name}} is not running.": "", "reload cached images.": "", "reloads images previously added using the 'cache add' subcommand": "", "retrieving node": "", + "saving node": "", "service {{.namespace_name}}/{{.service_name}} has no node port": "", + "startup failed": "", "stat failed": "", "status json failure": "", "status text failure": "", @@ -636,18 +669,19 @@ "usage: minikube config unset PROPERTY_NAME": "", "usage: minikube delete": "", "usage: minikube profile [MINIKUBE_PROFILE_NAME]": "", + "version json failure": "", + "version yaml failure": "", "zsh completion failed": "", + "{{ .name }}: {{ .rejection }}": "", + "{{.driver_name}} \"{{.cluster}}\" {{.machine_type}} is missing, will recreate.": "", "{{.driver}} does not appear to be installed": "", "{{.driver}} does not appear to be installed, but is specified by an existing profile. Please run 'minikube delete' or install {{.driver}}": "", "{{.extra_option_component_name}}.{{.key}}={{.value}}": "", - "{{.machine}} IP has been updated to point at {{.ip}}": "", - "{{.machine}} IP was already correctly configured for {{.ip}}": "", - "{{.name}} cluster does not exist": "", "{{.name}} has no available configuration options": "", "{{.name}} is already running": "", "{{.name}} was successfully configured": "", "{{.name}}\" profile does not exist": "Profil \"{{.name}}\" existiert nicht", - "{{.path}} is version {{.client_version}}, and is incompatible with Kubernetes {{.cluster_version}}. You will need to update {{.path}} or use 'minikube kubectl' to connect with this cluster": "", + "{{.path}} is v{{.client_version}}, which may be incompatible with Kubernetes v{{.cluster_version}}.": "", "{{.prefix}}minikube {{.version}} on {{.platform}}": "{{.prefix}}minikube {{.version}} auf {{.platform}}", "{{.type}} is not yet a supported filesystem. We will try anyways!": "", "{{.url}} is not accessible: {{.error}}": "" diff --git a/translations/es.json b/translations/es.json index 53f8e3f3f5..fe61127b7e 100644 --- a/translations/es.json +++ b/translations/es.json @@ -1,19 +1,16 @@ { "\"The '{{.minikube_addon}}' addon is disabled": "", + "\"{{.context}}\" context has been updated to point to {{.hostname}}:{{.port}}": "", + "\"{{.machineName}}\" does not exist, nothing to stop": "", "\"{{.name}}\" profile does not exist": "El perfil \"{{.name}}\" no existe", "\"{{.name}}\" profile does not exist, trying anyways.": "", - "\"{{.node_name}}\" stopped.": "", - "\"{{.profile_name}}\" does not exist, nothing to stop": "", - "\"{{.profile_name}}\" host does not exist, unable to show an IP": "", "'none' driver does not support 'minikube docker-env' command": "", "'none' driver does not support 'minikube mount' command": "", "'none' driver does not support 'minikube podman-env' command": "", "'none' driver does not support 'minikube ssh' command": "", "'{{.driver}}' driver reported an issue: {{.error}}": "", - "'{{.profile}}' is not running": "", - "- {{.profile}}": "", "A VPN or firewall is interfering with HTTP access to the minikube VM. Alternatively, try a different VM driver: https://minikube.sigs.k8s.io/docs/start/": "", - "A firewall is blocking Docker the minikube VM from reaching the internet. You may need to configure it to use a proxy.": "", + "A firewall is blocking Docker the minikube VM from reaching the image repository. You may need to select --image-repository, or use a proxy.": "", "A firewall is interfering with minikube's ability to make outgoing HTTPS requests. You may need to change the value of the HTTPS_PROXY environment variable.": "", "A firewall is likely blocking minikube from reaching the internet. You may need to configure minikube to use a proxy.": "", "A set of apiserver IP Addresses which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine": "", @@ -32,30 +29,31 @@ "Adds a node to the given cluster config, and starts it.": "", "Adds a node to the given cluster.": "", "Advanced Commands:": "", - "After minikube restart the dockerd ports might have changed. To ensure docker-env works properly.\nPlease re-eval the docker-env command:\n\n\t'minikube -p {{.profile_name}} docker-env'": "", "Aliases": "", "Allow user prompts for more information": "", "Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to \\\"auto\\\" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers": "Repositorio de imágenes alternativo del que extraer imágenes de Docker. Puedes usarlo cuando tengas acceso limitado a gcr.io. Si quieres que minikube elija uno por ti, solo tienes que definir el valor como \"auto\". Los usuarios de China continental pueden utilizar réplicas locales de gcr.io, como registry.cn-hangzhou.aliyuncs.com/google_containers", "Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g)": "Cantidad de RAM asignada a la VM de minikube (formato: \u003cnúmero\u003e[\u003cunidad\u003e], donde unidad = b, k, m o g)", - "Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "", + "Amount of RAM to allocate to Kubernetes (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "", "Amount of time to wait for a service in seconds": "", "Amount of time to wait for service in seconds": "", "Another hypervisor, such as VirtualBox, is conflicting with KVM. Please stop the other hypervisor, or use --driver to switch to it.": "", + "Another program is using a file required by minikube. If you are using Hyper-V, try stopping the minikube VM from within the Hyper-V manager": "", "Automatically selected the {{.driver}} driver": "", "Automatically selected the {{.driver}} driver. Other choices: {{.alternates}}": "", "Available Commands": "", "Basic Commands:": "", "Because you are using docker driver on Mac, the terminal needs to be open to run it.": "", "Bind Address: {{.Address}}": "", - "Block until the apiserver is servicing API requests": "", + "Both driver={{.driver}} and vm-driver={{.vmd}} have been set.\n\n Since vm-driver is deprecated, minikube will default to driver={{.driver}}.\n\n If vm-driver is set in the global config, please run \"minikube config unset vm-driver\" to resolve this warning.": "", "Cannot find directory {{.path}} for mount": "", "Cannot use both --output and --format options": "", "Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start": "", "Check that SELinux is disabled, and that the provided apiserver flags are valid": "", "Check that minikube is running and that you have specified the correct namespace (-n flag) if required.": "", - "Check that the provided apiserver flags are valid": "", + "Check that the provided apiserver flags are valid, and that SELinux is disabled": "", "Check that your --kubernetes-version has a leading 'v'. For example: 'v1.1.14'": "", "Check your firewall rules for interference, and run 'virt-host-validate' to check for KVM configuration issues. If you are running minikube within a VM, consider using --driver=none": "", + "Choose a smaller value for --memory, such as 2000": "", "Configuration and Management Commands:": "", "Configure a default route on this Linux host, or use another --driver that does not require it": "", "Configure an external network switch following the official documentation, then add `--hyperv-virtual-switch=\u003cswitch-name\u003e` to `minikube start`": "", @@ -66,9 +64,9 @@ "Could not process error from failed deletion": "", "Could not process errors from failed deletion": "", "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.": "Código de país de la réplica de imagen que quieras utilizar. Déjalo en blanco para usar el valor global. Los usuarios de China continental deben definirlo como cn.", - "Creating Kubernetes in {{.driver_name}} container with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "", + "Creating Kubernetes in {{.driver_name}} {{.machine_type}} with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "", "Creating mount {{.name}} ...": "Creando la activación {{.name}}...", - "Creating {{.driver_name}} VM (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", + "Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", "DEPRECATED, use `driver` instead.": "", "Default group id used for the mount": "", "Default user id used for the mount": "", @@ -97,10 +95,10 @@ "Done! kubectl is now configured to use \"{{.name}}\"": "", "Done! kubectl is now configured to use \"{{.name}}__1": "¡Listo! Se ha configurado kubectl para que use \"{{.name}}", "Download complete!": "Se ha completado la descarga", + "Downloading Kubernetes {{.version}} preload ...": "", "Downloading VM boot image ...": "", "Downloading driver {{.driver}}:": "", - "Downloading preloaded images tarball for k8s {{.version}} ...": "", - "Downloading {{.name}} {{.version}}": "", + "Due to {{.driver_name}} networking limitations on {{.os_name}}, {{.addon_name}} addon is not supported for this driver.\nAlternatively to use this addon you can use a vm-based driver:\n\n\t'minikube start --vm=true'\n\nTo track the update on this work in progress feature please check:\nhttps://github.com/kubernetes/minikube/issues/7332": "", "ERROR creating `registry-creds-acr` secret": "", "ERROR creating `registry-creds-dpr` secret": "", "ERROR creating `registry-creds-ecr` secret: {{.error}}": "", @@ -109,7 +107,6 @@ "Enable addons. see `minikube addons list` for a list of valid addon names.": "", "Enable experimental NVIDIA GPU support in minikube": "Permite habilitar la compatibilidad experimental con GPUs NVIDIA en minikube", "Enable host resolver for NAT DNS requests (virtualbox driver only)": "Permite habilitar la resolución del host en las solicitudes DNS con traducción de direcciones de red (NAT) aplicada (solo con el controlador de Virtualbox)", - "Enable istio needs {{.minMem}} MB of memory and {{.minCpus}} CPUs.": "", "Enable proxy for NAT DNS requests (virtualbox driver only)": "Permite habilitar el uso de proxies en las solicitudes de DNS con traducción de direcciones de red (NAT) aplicada (solo con el controlador de Virtualbox)", "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \\\"--network-plugin=cni\\": "Permite habilitar el complemento CNI predeterminado (/etc/cni/net.d/k8s.conf). Se utiliza junto con \"--network-plugin=cni", "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \\\"--network-plugin=cni\\\".": "", @@ -122,7 +119,6 @@ "Ensure that the user listed in /etc/libvirt/qemu.conf has access to your home directory": "", "Ensure that your value for HTTPS_PROXY points to an HTTPS proxy rather than an HTTP proxy": "", "Environment variables to pass to the Docker daemon. (format: key=value)": "Variables de entorno que se transferirán al daemon de Docker. Formato: clave=valor", - "Error adding node to cluster": "", "Error checking driver version: {{.error}}": "No se ha podido comprobar la versión del controlador: {{.error}}", "Error creating minikube directory": "", "Error creating view template": "", @@ -131,45 +127,29 @@ "Error finding port for mount": "", "Error generating set output": "", "Error generating unset output": "", - "Error getting IP": "", - "Error getting client": "", - "Error getting client: {{.error}}": "", - "Error getting cluster": "", "Error getting cluster bootstrapper": "", "Error getting cluster config": "", - "Error getting config": "", - "Error getting control plane": "", "Error getting host": "", - "Error getting host IP": "", - "Error getting host status": "", - "Error getting machine logs": "", "Error getting port binding for '{{.driver_name}} driver: {{.error}}": "", "Error getting primary control plane": "", - "Error getting primary cp": "", - "Error getting service status": "", "Error getting service with namespace: {{.namespace}} and labels {{.labelName}}:{{.addonName}}: {{.error}}": "", "Error getting ssh client": "", "Error getting the host IP address to use from within the VM": "", - "Error host driver ip status": "", "Error killing mount process": "", - "Error loading api": "", - "Error loading profile config": "", "Error loading profile config: {{.error}}": "", "Error loading profile {{.name}}: {{.error}}": "No se ha podido cargar el perfil {{.name}}: {{.error}}", "Error opening service": "", "Error parsing Driver version: {{.error}}": "No se ha podido analizar la versión de Driver: {{.error}}", "Error parsing minikube version: {{.error}}": "No se ha podido analizar la versión de minikube: {{.error}}", "Error reading {{.path}}: {{.error}}": "", - "Error retrieving node": "", "Error starting cluster": "", "Error starting mount": "", - "Error starting node": "", "Error while setting kubectl current context : {{.error}}": "", "Error writing mount pid": "", - "Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}\"": "", "Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}": "Error: Has seleccionado Kubernetes {{.new}}, pero el clúster de tu perfil utiliza la versión {{.old}}. No se puede cambiar a una versión inferior sin eliminar todos los datos y recursos pertinentes, pero dispones de las siguientes opciones para continuar con la operación:\n* Volver a crear el clúster con Kubernetes {{.new}}: ejecuta \"minikube delete {{.profile}}\" y, luego, \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Crear un segundo clúster con Kubernetes {{.new}}: ejecuta \"minikube start -p \u003cnuevo nombre\u003e --kubernetes-version={{.new}}\"\n* Reutilizar el clúster actual con Kubernetes {{.old}} o una versión posterior: ejecuta \"minikube start {{.profile}} --kubernetes-version={{.old}}", - "Error: [{{.id}}] {{.error}}": "", "Examples": "", + "Executing \"{{.command}}\" took an unusually long time: {{.duration}}": "", + "Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'": "", "Exiting": "Saliendo", "Exiting.": "", "External Adapter on which external switch will be created if no external switch is found. (hyperv driver only)": "", @@ -177,21 +157,21 @@ "Failed to cache ISO": "", "Failed to cache and load images": "", "Failed to cache binaries": "", + "Failed to cache images": "", "Failed to cache images to tar": "", "Failed to cache kubectl": "", "Failed to change permissions for {{.minikube_dir_path}}: {{.error}}": "No se han podido cambiar los permisos de {{.minikube_dir_path}}: {{.error}}", - "Failed to check if machine exists": "", "Failed to check main repository and mirrors for images for images": "", + "Failed to delete cluster {{.name}}, proceeding with retry anyway.": "", "Failed to delete cluster: {{.error}}": "No se ha podido eliminar el clúster: {{.error}}", "Failed to delete cluster: {{.error}}__1": "No se ha podido eliminar el clúster: {{.error}}", "Failed to delete images": "", "Failed to delete images from config": "", - "Failed to delete node {{.name}}": "", "Failed to enable container runtime": "", "Failed to generate config": "", + "Failed to get API Server URL": "", "Failed to get bootstrapper": "", "Failed to get command runner": "", - "Failed to get driver URL": "", "Failed to get image map": "", "Failed to get machine client": "", "Failed to get service URL: {{.error}}": "", @@ -202,29 +182,31 @@ "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}": "No se ha podido definir la variable de entorno NO_PROXY. Utiliza export NO_PROXY=$NO_PROXY,{{.ip}}", "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.": "", "Failed to setup certs": "", - "Failed to setup kubeconfig": "", - "Failed to start node {{.name}}": "", "Failed to stop node {{.name}}": "", "Failed to update cluster": "", "Failed to update config": "", + "Failed to validate '{{.driver}}' driver": "", "Failed unmount: {{.error}}": "", "File permissions used for the mount": "", + "Filter to use only VM Drivers": "", "Flags": "", "Follow": "", "For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "Para disfrutar de un funcionamiento óptimo, instala kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/", "For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/__1": "Para disfrutar de un funcionamiento óptimo, instala kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/", "For more information, see:": "Para obtener más información, consulta lo siguiente:", + "For more information, see: https://minikube.sigs.k8s.io/docs/reference/drivers/none/": "", "Force environment to be configured for a specified shell: [fish, cmd, powershell, tcsh, bash, zsh], default is auto-detect": "", "Force minikube to perform possibly dangerous operations": "Permite forzar minikube para que realice operaciones potencialmente peligrosas", "Found network options:": "Se han encontrado las siguientes opciones de red:", "Found {{.number}} invalid profile(s) !": "", + "Generate unable to parse disk size '{{.diskSize}}': {{.error}}": "", + "Generate unable to parse memory '{{.memory}}': {{.error}}": "", "Gets the kubernetes URL(s) for the specified service in your local cluster": "", "Gets the kubernetes URL(s) for the specified service in your local cluster. In the case of multiple URLs they will be printed one at a time.": "", "Gets the logs of the running instance, used for debugging minikube, not user code.": "", "Gets the status of a local kubernetes cluster": "", "Gets the status of a local kubernetes cluster.\n\tExit status contains the status of minikube's VM, cluster and kubernetes encoded on it's bits in this order from right to left.\n\tEg: 7 meaning: 1 (for minikube NOK) + 2 (for cluster NOK) + 4 (for kubernetes NOK)": "", "Gets the value of PROPERTY_NAME from the minikube config file": "", - "Getting machine config failed": "", "Global Flags": "", "Go template format string for the cache list output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#CacheListTemplate": "", "Go template format string for the config view output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate": "", @@ -235,6 +217,8 @@ "Hyperkit is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "", "Hyperkit networking is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "", "If set, automatically updates drivers to the latest version. Defaults to true.": "", + "If set, delete the current cluster if start fails and try again. Defaults to false.": "", + "If set, download tarball of preloaded images if available to improve start time. Defaults to true.": "", "If set, install addons. Defaults to true.": "", "If set, pause all namespaces": "", "If set, unpause all namespaces": "", @@ -251,8 +235,9 @@ "Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added.": "Registros de Docker que no son seguros y que se transferirán al daemon de Docker. Se añadirá automáticamente el intervalo CIDR de servicio predeterminado.", "Install VirtualBox, or select an alternative value for --driver": "", "Install the latest hyperkit binary, and run 'minikube delete'": "", - "Invalid size passed in argument: {{.error}}": "", "IsEnabled failed": "", + "Istio needs {{.minCPUs}} CPUs -- your configuration only allocates {{.cpus}} CPUs": "", + "Istio needs {{.minMem}}MB of memory -- your configuration only allocates {{.memory}}MB": "", "Kill the mount process spawned by minikube start": "", "Kubernetes {{.new}} is now available. If you would like to upgrade, specify: --kubernetes-version={{.new}}": "", "Kubernetes {{.version}} is not supported by this release of minikube": "", @@ -267,7 +252,7 @@ "Local folders to share with Guest via NFS mounts (hyperkit driver only)": "Carpetas locales que se compartirán con el invitado mediante activaciones de NFS (solo con el controlador de hyperkit)", "Location of the VPNKit socket used for networking. If empty, disables Hyperkit VPNKitSock, if 'auto' uses Docker for Mac VPNKit connection, otherwise uses the specified VSock (hyperkit driver only)": "Ubicación del socket de VPNKit que se utiliza para ofrecer funciones de red. Si se deja en blanco, se inhabilita VPNKitSock de Hyperkit; si se define como \"auto\", se utiliza Docker para las conexiones de VPNKit en Mac. Con cualquier otro valor, se utiliza el VSock especificado (solo con el controlador de hyperkit)", "Location of the minikube iso": "Ubicación de la ISO de minikube", - "Location of the minikube iso.": "", + "Locations to fetch the minikube ISO from.": "", "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'": "", "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'.": "", "Message Size: {{.size}}": "", @@ -275,6 +260,7 @@ "Minikube is a tool for managing local Kubernetes clusters.": "", "Modify minikube config": "", "Modify minikube's kubernetes addons": "", + "Most users should use the newer 'docker' driver instead, which does not require root!": "", "Mount type: {{.name}}": "", "Mounting host path {{.sourcePath}} into VM as {{.destinationPath}} ...": "", "Mounts the specified directory into minikube": "", @@ -284,18 +270,23 @@ "NIC Type used for host only network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only)": "", "NOTE: This process must stay alive for the mount to be accessible ...": "", "Networking and Connectivity Commands:": "", + "No changes required for the \"{{.context}}\" context": "", "No minikube profile was found. You can create one using `minikube start`.": "", - "Node may be unable to resolve external DNS records": "", + "Node \"{{.node_name}}\" stopped.": "", "Node operations": "", + "Node {{.name}} failed to start, deleting and trying again.": "", "Node {{.name}} was successfully deleted.": "", + "Node {{.nodeName}} does not exist.": "", + "Non-destructive downgrades are not supported, but you can proceed with one of the following options:\n\n 1) Recreate the cluster with Kubernetes v{{.new}}, by running:\n\n minikube delete{{.profile}}\n minikube start{{.profile}} --kubernetes-version={{.new}}\n\n 2) Create a second cluster with Kubernetes v{{.new}}, by running:\n\n minikube start -p {{.suggestedName}} --kubernetes-version={{.new}}\n\n 3) Use the existing cluster at version Kubernetes v{{.old}}, by running:\n\n minikube start{{.profile}} --kubernetes-version={{.old}}": "", "None of the known repositories in your location are accessible. Using {{.image_repository_name}} as fallback.": "No se puede acceder a ninguno de los repositorios conocidos de tu ubicación. Se utilizará {{.image_repository_name}} como alternativa.", "None of the known repositories is accessible. Consider specifying an alternative image repository with --image-repository flag": "No se puede acceder a ninguno de los repositorios conocidos. Plantéate indicar un repositorio de imágenes alternativo con la marca --image-repository.", "Not passing {{.name}}={{.value}} to docker env.": "", - "Noticed that you are using minikube docker-env:": "", + "Noticed you have an activated docker-env on {{.driver_name}} driver in this terminal:": "", + "Number of CPUs allocated to Kubernetes.": "", "Number of CPUs allocated to the minikube VM": "Número de CPU asignadas a la VM de minikube", - "Number of CPUs allocated to the minikube VM.": "", "Number of lines back to go within the log": "", "OS release is {{.pretty_name}}": "", + "One of 'yaml' or 'json'.": "", "Open the addons URL with https instead of http": "", "Open the service URL with https instead of http": "", "Opening kubernetes service {{.namespace_name}}/{{.service_name}} in default browser...": "", @@ -314,48 +305,61 @@ "Please install the minikube hyperkit VM driver, or select an alternative --driver": "", "Please install the minikube kvm2 VM driver, or select an alternative --driver": "", "Please make sure the service you are looking for is deployed or is in the correct namespace.": "", + "Please re-eval your docker-env, To ensure your environment variables have updated ports: \n\n\t'minikube -p {{.profile_name}} docker-env'": "", "Please specify the directory to be mounted: \n\tminikube mount \u003csource directory\u003e:\u003ctarget directory\u003e (example: \"/host-home:/vm-home\")": "", "Please upgrade the '{{.driver_executable}}'. {{.documentation_url}}": "Actualiza \"{{.driver_executable}}\". {{.documentation_url}}", "Populates the specified folder with documentation in markdown about minikube": "", "Powering off \"{{.profile_name}}\" via SSH ...": "Apagando \"{{.profile_name}}\" mediante SSH...", "Preparing Kubernetes {{.k8sVersion}} on {{.runtime}} {{.runtimeVersion}} ...": "Preparando Kubernetes {{.k8sVersion}} en {{.runtime}} {{.runtimeVersion}}...", "Print current and latest version number": "", + "Print just the version number.": "", "Print the version of minikube": "", "Print the version of minikube.": "", "Problems detected in {{.entry}}:": "", "Problems detected in {{.name}}:": "", "Profile gets or sets the current minikube profile": "", - "Profile name \"{{.profilename}}\" is minikube keyword. To delete profile use command minikube delete -p \u003cprofile name\u003e": "", + "Profile name \"{{.profilename}}\" is reserved keyword. To delete this profile, run: \"{{.cmd}}\"": "", "Provide VM UUID to restore MAC address (hyperkit driver only)": "Permite especificar un UUID de VM para restaurar la dirección MAC (solo con el controlador de hyperkit)", + "Pulling base image ...": "", "Reboot to complete VirtualBox installation, verify that VirtualBox is not blocked by your system, and/or use another hypervisor": "", "Rebuild libvirt with virt-network support": "", "Received {{.name}} signal": "", - "Reconfiguring existing host ...": "", "Registry mirrors to pass to the Docker daemon": "Réplicas del registro que se transferirán al daemon de Docker", "Reinstall VirtualBox and reboot. Alternatively, try the kvm2 driver: https://minikube.sigs.k8s.io/docs/reference/drivers/kvm2/": "", "Reinstall VirtualBox and verify that it is not blocked: System Preferences -\u003e Security \u0026 Privacy -\u003e General -\u003e Some system software was blocked from loading": "", + "Related issue: {{.url}}": "", "Related issues:": "", "Relaunching Kubernetes using {{.bootstrapper}} ...": "Reiniciando Kubernetes con {{.bootstrapper}}...", + "Remove the incompatible --docker-opt flag if one was provided": "", "Removed all traces of the \"{{.name}}\" cluster.": "", "Removing {{.directory}} ...": "Eliminando {{.directory}}...", "Requested cpu count {{.requested_cpus}} is less than the minimum allowed of {{.minimum_cpus}}": "", "Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}": "El tamaño de disco de {{.requested_size}} que se ha solicitado es inferior al tamaño mínimo de {{.minimum_size}}", "Requested memory allocation ({{.memory}}MB) is less than the default memory allocation of {{.default_memorysize}}MB. Beware that minikube might not work correctly or crash unexpectedly.": "El valor de la asignación de memoria ({{.memory}} MB) solicitada es inferior a la asignación de memoria predeterminada de {{.default_memorysize}} MB. minikube podría no funcionar correctamente o fallar de manera inesperada.", + "Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommended}}MB. Kubernetes may crash unexpectedly.": "", "Requested memory allocation {{.requested_size}} is less than the minimum allowed of {{.minimum_size}}": "El valor de la asignación de memoria de {{.requested_size}} solicitada es inferior al valor mínimo de {{.minimum_size}}", + "Requested memory allocation {{.requested}}MB is less than the usable minimum of {{.minimum}}MB": "", + "Restart Docker": "", + "Restarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "", + "Restarting the {{.name}} service may improve performance.": "", "Retrieve the ssh identity key path of the specified cluster": "", "Retrieve the ssh identity key path of the specified cluster.": "", "Retrieves the IP address of the running cluster": "", "Retrieves the IP address of the running cluster, and writes it to STDOUT.": "", "Retrieves the IP address of the running cluster, checks it\n\t\t\twith IP in kubeconfig, and corrects kubeconfig if incorrect.": "", "Returns the value of PROPERTY_NAME from the minikube config file. Can be overwritten at runtime by flags or environmental variables.": "", + "Right-click the PowerShell icon and select Run as Administrator to open PowerShell in elevated mode.": "", "Run 'kubectl describe pod coredns -n kube-system' and check for a firewall or DNS conflict": "", "Run 'minikube delete' to delete the stale VM, or and ensure that minikube is running as the same user you are issuing this command with": "", + "Run 'sudo sysctl fs.protected_regular=1', or try a driver which does not require root, such as '--driver=docker'": "", "Run kubectl": "", "Run minikube from the C: drive.": "", "Run the kubernetes client, download it if necessary. Remember -- after kubectl!\n\nExamples:\nminikube kubectl -- --help\nminikube kubectl -- get pods --namespace kube-system": "", - "Run the minikube command as an Administrator": "", "Run: 'chmod 600 $HOME/.kube/config'": "", + "Run: 'kubectl delete clusterrolebinding kubernetes-dashboard'": "", + "Run: 'sudo mkdir /sys/fs/cgroup/systemd \u0026\u0026 sudo mount -t cgroup -o none,name=systemd cgroup /sys/fs/cgroup/systemd'": "", "Running on localhost (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", + "Service '{{.service}}' was not found in '{{.namespace}}' namespace.\nYou may select another namespace by using 'minikube service {{.service}} -n \u003cnamespace\u003e'. Or list out all the services using 'minikube service list'": "", "Set failed": "", "Set flag to delete all profiles": "", "Set this flag to delete the '.minikube' folder from your user directory.": "", @@ -370,6 +374,7 @@ "Show only log entries which point to known problems": "", "Show only the most recent journal entries, and continuously print new entries as they are appended to the journal.": "", "Skipped switching kubectl context for {{.profile_name}} because --keep-context was set.": "", + "Sorry, Kubernetes v{{.k8sVersion}} requires conntrack to be installed in root's path": "", "Sorry, Kubernetes {{.version}} is not supported by this release of minikube": "", "Sorry, completion support is not yet implemented for {{.name}}": "", "Sorry, the kubeadm.{{.parameter_name}} parameter is currently not supported by --extra-config": "De momento, --extra-config no admite el parámetro kubeadm.{{.parameter_name}}", @@ -381,8 +386,10 @@ "Specify the 9p version that the mount should use": "", "Specify the ip that the mount should be setup on": "", "Specify the mount filesystem type (supported types: 9p)": "", - "Starting existing {{.driver_name}} VM for \"{{.profile_name}}\" ...": "", - "Starting node": "", + "Start failed after cluster deletion": "", + "StartHost failed, but will try again: {{.error}}": "", + "Starting control plane node {{.name}} in cluster {{.cluster}}": "", + "Starting node {{.name}} in cluster {{.cluster}}": "", "Starting tunnel for service {{.service}}.": "", "Starts a local kubernetes cluster": "Inicia un clúster de Kubernetes local", "Starts a node.": "", @@ -395,16 +402,16 @@ "Successfully added {{.name}} to {{.cluster}}!": "", "Successfully deleted all profiles": "", "Successfully mounted {{.sourcePath}} to {{.destinationPath}}": "", - "Successfully powered off Hyper-V. minikube driver -- {{.driver}}": "", "Successfully purged minikube directory located at - [{{.minikubeDirectory}}]": "", "Suggestion: {{.advice}}": "", "Suggestion: {{.fix}}": "", "Target directory {{.path}} must be an absolute path": "", - "The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube --driver={{.driver_name}}'.": "", "The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube --vm-driver={{.driver_name}}": "El controlador \"{{.driver_name}}\" requiere privilegios de raíz. Ejecuta minikube mediante sudo minikube --vm-driver={{.driver_name}}", + "The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube start --driver={{.driver_name}}'.": "", "The \"{{.driver_name}}\" driver should not be used with root privileges.": "", "The \"{{.name}}\" cluster has been deleted.": "Se ha eliminado el clúster \"{{.name}}\".", "The \"{{.name}}\" cluster has been deleted.__1": "Se ha eliminado el clúster \"{{.name}}\".", + "The 'none' driver is designed for experts who need to integrate with an existing VM": "", "The 'none' driver provides limited isolation and may reduce system security and reliability.": "La opción de controlador \"none\" proporciona un aislamiento limitado y puede reducir la seguridad y la fiabilidad del sistema.", "The '{{.addonName}}' addon is enabled": "", "The '{{.driver}}' driver requires elevated permissions. The following commands will be executed:\\n\\n{{ .example }}\\n": "", @@ -420,19 +427,23 @@ "The VM driver exited with an error, and may be corrupt. Run 'minikube start' with --alsologtostderr -v=8 to see the error": "", "The VM that minikube is configured for no longer exists. Run 'minikube delete'": "", "The apiserver listening port": "El puerto de escucha del apiserver", - "The apiserver name which is used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine": "", "The apiserver name which is used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine": "El nombre del apiserver del certificado de Kubernetes generado. Se puede utilizar para que sea posible acceder al apiserver desde fuera de la máquina", "The argument to pass the minikube mount command on start": "El argumento para ejecutar el comando de activación de minikube durante el inicio", "The argument to pass the minikube mount command on start.": "", + "The authoritative apiserver hostname for apiserver certificates and connectivity. This can be used if you want to make the apiserver available from outside the machine": "", "The cluster dns domain name used in the kubernetes cluster": "El nombre de dominio de DNS del clúster de Kubernetes", "The container runtime to be used (docker, crio, containerd)": "El entorno de ejecución del contenedor (Docker, cri-o, containerd)", "The container runtime to be used (docker, crio, containerd).": "", + "The control plane for \"{{.name}}\" is paused!": "", + "The control plane node \"{{.name}}\" does not exist.": "", + "The control plane node is not running (state={{.state}})": "", + "The control plane node must be running for this command": "", "The cri socket path to be used": "La ruta del socket de cri", "The cri socket path to be used.": "", - "The docker service within '{{.profile}}' is not active": "", + "The docker service within '{{.name}}' is not active": "", + "The docker-env command is only compatible with the \"docker\" runtime, but this cluster was configured to use the \"{{.runtime}}\" runtime.": "", "The driver '{{.driver}}' is not supported on {{.os}}": "El controlador \"{{.driver}}\" no se puede utilizar en {{.os}}", - "The driver {{.experimental}} '{{.driver}}' is not supported on {{.os}}": "", - "The existing \"{{.profile_name}}\" VM that was created using the \"{{.old_driver}}\" driver, and is incompatible with the \"{{.driver}}\" driver.": "", + "The existing \"{{.name}}\" VM was created using the \"{{.old}}\" driver, and is incompatible with the \"{{.new}}\" driver.": "", "The hyperv virtual switch name. Defaults to first found. (hyperv driver only)": "El nombre del conmutador virtual de hyperv. El valor predeterminado será el primer nombre que se encuentre (solo con el controlador de hyperv).", "The hypervisor does not appear to be configured properly. Run 'minikube start --alsologtostderr -v=1' and inspect the error code": "", "The initial time interval for each check that wait performs in seconds": "", @@ -444,10 +455,13 @@ "The name of the node to delete": "", "The name of the node to start": "", "The node to get logs from. Defaults to the primary control plane.": "", + "The node to ssh into. Defaults to the primary control plane.": "", + "The none driver is not compatible with multi-node clusters.": "", "The number of bytes to use for 9p packet payload": "", + "The number of nodes to spin up. Defaults to 1.": "", "The output format. One of 'json', 'table'": "", "The path on the file system where the docs in markdown need to be saved": "", - "The podman service within '{{.profile}}' is not active": "", + "The podman service within '{{.cluster}}' is not active": "", "The service namespace": "", "The service {{.service}} requires privileged ports to be exposed: {{.ports}}": "", "The services namespace": "", @@ -456,46 +470,63 @@ "The value passed to --format is invalid: {{.error}}": "", "The vmwarefusion driver is deprecated and support for it will be removed in a future release.\n\t\t\tPlease consider switching to the new vmware unified driver, which is intended to replace the vmwarefusion driver.\n\t\t\tSee https://minikube.sigs.k8s.io/docs/reference/drivers/vmware/ for more information.\n\t\t\tTo disable this message, run [minikube config set ShowDriverDeprecationNotification false]": "", "The {{.driver_name}} driver should not be used with root privileges.": "El controlador {{.driver_name}} no se debe utilizar con privilegios de raíz.", + "There is no local cluster named \"{{.cluster}}\"": "", "There's a new version for '{{.driver_executable}}'. Please consider upgrading. {{.documentation_url}}": "Hay una nueva versión de \"{{.driver_executable}}\". Te recomendamos que realices la actualización. {{.documentation_url}}", "These changes will take effect upon a minikube delete and then a minikube start": "", "This addon does not have an endpoint defined for the 'addons open' command.\nYou can add one by annotating a service with the label {{.labelName}}:{{.addonName}}": "", "This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true": "El proceso se puede automatizar si se define la variable de entorno CHANGE_MINIKUBE_NONE_USER=true", + "This control plane is not running! (state={{.state}})": "", + "This driver does not yet work on your architecture. Maybe try --driver=none": "", + "This is unusual - you may want to investigate using \"{{.command}}\"": "", "This will keep the existing kubectl context and will create a minikube context.": "Se conservará el contexto de kubectl actual y se creará uno de minikube.", "This will start the mount daemon and automatically mount files into minikube": "Se iniciará el daemon de activación y se activarán automáticamente los archivos en minikube", "This will start the mount daemon and automatically mount files into minikube.": "", + "This {{.type}} is having trouble accessing https://{{.repository}}": "", + "Tip: To remove this root owned cluster, run: sudo {{.cmd}}": "", "Tip: To remove this root owned cluster, run: sudo {{.cmd}} delete": "Para eliminar este clúster de raíz, ejecuta: sudo {{.cmd}} delete", "To connect to this cluster, use: kubectl --context={{.name}}": "Para conectarte a este clúster, usa: kubectl --context={{.name}}", "To connect to this cluster, use: kubectl --context={{.name}}__1": "Para conectarte a este clúster, usa: kubectl --context={{.name}}", "To connect to this cluster, use: kubectl --context={{.profile_name}}": "", "To disable this notice, run: 'minikube config set WantUpdateNotification false'\\n": "", - "To proceed, either:\n\n 1) Delete the existing \"{{.profile_name}}\" cluster using: '{{.command}} delete'\n\n * or *\n\n 2) Start the existing \"{{.profile_name}}\" cluster using: '{{.command}} start --driver={{.old_driver}}'": "", + "To fix this, run: \"{{.command}}\"": "", + "To proceed, either:\n\n1) Delete the existing \"{{.name}}\" cluster using: '{{.delcommand}}'\n\n* or *\n\n2) Start the existing \"{{.name}}\" cluster using: '{{.command}} --driver={{.old}}'": "", + "To pull new external images, you may need to configure a proxy: https://minikube.sigs.k8s.io/docs/reference/networking/proxy/": "", "To see addons list for other profiles use: `minikube addons -p name list`": "", - "To start minikube with HyperV Powershell must be in your PATH`": "", + "To start minikube with Hyper-V, Powershell must be in your PATH`": "", "To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:": "Para usar comandos de kubectl o minikube como tu propio usuario, puede que debas reubicarlos. Por ejemplo, para sobrescribir tu configuración, ejecuta:", "Troubleshooting Commands:": "", + "Try 'minikube delete' to force new SSL certificates to be installed": "", + "Try 'minikube delete', and disable any conflicting VPN or firewall software": "", + "Try specifying a --driver, or see https://minikube.sigs.k8s.io/docs/start/": "", "Trying to delete invalid profile {{.profile}}": "", "Unable to bind flags": "", - "Unable to determine a default driver to use. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/": "", "Unable to enable dashboard": "", "Unable to fetch latest version info": "", + "Unable to find control plane": "", "Unable to generate docs": "", "Unable to generate the documentation. Please ensure that the path specified is a directory, exists \u0026 you have permission to write to it.": "", "Unable to get VM IP address": "", "Unable to get addon status for {{.name}}: {{.error}}": "", "Unable to get bootstrapper: {{.error}}": "No se ha podido obtener el programa previo: {{.error}}", + "Unable to get command runner": "", + "Unable to get control plane status: {{.error}}": "", "Unable to get current user": "", + "Unable to get forwarded endpoint": "", + "Unable to get machine status": "", "Unable to get runtime": "", - "Unable to get the status of the {{.name}} cluster.": "", "Unable to kill mount process: {{.error}}": "", "Unable to load cached images from config file.": "No se han podido cargar las imágenes almacenadas en caché del archivo de configuración.", "Unable to load cached images: {{.error}}": "", "Unable to load config: {{.error}}": "No se ha podido cargar la configuración: {{.error}}", + "Unable to load host": "", "Unable to parse \"{{.kubernetes_version}}\": {{.error}}": "No se ha podido analizar la versión \"{{.kubernetes_version}}\": {{.error}}", "Unable to parse default Kubernetes version from constants: {{.error}}": "", + "Unable to parse memory '{{.memory}}': {{.error}}": "", "Unable to parse oldest Kubernetes version from constants: {{.error}}": "", + "Unable to pick a default driver. Here is what was considered, in preference order:": "", "Unable to pull images, which may be OK: {{.error}}": "No se ha podido recuperar imágenes, que podrían estar en buen estado: {{.error}}", - "Unable to remove machine directory: %v": "", - "Unable to start VM. Please investigate and run 'minikube delete' if possible": "", + "Unable to remove machine directory": "", + "Unable to restart cluster, will reset it: {{.error}}": "", "Unable to stop VM": "", "Unable to update {{.driver}} driver: {{.error}}": "", "Unable to verify SSH connectivity: {{.error}}. Will retry...": "", @@ -506,6 +537,7 @@ "Unset the KUBECONFIG environment variable, or verify that it does not point to an empty or otherwise invalid path": "", "Unset variables instead of setting them": "", "Update server returned an empty list": "", + "Updating the running {{.driver_name}} \"{{.cluster}}\" {{.machine_type}} ...": "", "Upgrade to QEMU v3.1.0+, run 'virt-host-validate', or ensure that you are not running in a nested VM environment.": "", "Upgrading from Kubernetes {{.old}} to {{.new}}": "Actualizando la versión de Kubernetes de {{.old}} a {{.new}}", "Usage": "", @@ -526,11 +558,10 @@ "Userspace file server:": "", "Using image repository {{.name}}": "Utilizando el repositorio de imágenes {{.name}}", "Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!": "", - "Using the running {{.driver_name}} \"{{.profile_name}}\" VM ...": "", "Using the {{.driver}} driver based on existing profile": "", "Using the {{.driver}} driver based on user configuration": "", "VM driver is one of: %v": "El controlador de la VM es uno de los siguientes: %v", - "VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository": "", + "Validation unable to parse disk size '{{.diskSize}}': {{.error}}": "", "Verify that your HTTP_PROXY and HTTPS_PROXY environment variables are set correctly.": "", "Verify the IP address of the running cluster in kubeconfig.": "", "Verifying dashboard health ...": "", @@ -541,62 +572,63 @@ "VirtualBox is broken. Disable real-time anti-virus software, reboot, and reinstall VirtualBox if the problem continues.": "", "VirtualBox is broken. Reinstall VirtualBox, reboot, and run 'minikube delete'.": "", "VirtualBox is unable to find its network interface. Try upgrading to the latest release and rebooting.": "", - "Virtualization support is disabled on your computer. If you are running minikube within a VM, try '--driver=none'. Otherwise, consult your systems BIOS manual for how to enable virtualization.": "", - "Wait failed": "", + "Virtualization support is disabled on your computer. If you are running minikube within a VM, try '--driver=docker'. Otherwise, consult your systems BIOS manual for how to enable virtualization.": "", "Wait failed: {{.error}}": "", "Wait until Kubernetes core services are healthy before exiting": "Espera hasta que los servicios principales de Kubernetes se encuentren en buen estado antes de salir", - "Waiting for cluster to come online ...": "", "Where to root the NFS Shares, defaults to /nfsshares (hyperkit driver only)": "Ruta en la raíz de los recursos compartidos de NFS. Su valor predeterminado es /nfsshares (solo con el controlador de hyperkit)", "Whether to use external switch over Default Switch if virtual switch not explicitly specified. (hyperv driver only)": "", "You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details": "Parece que estás usando un proxy, pero tu entorno NO_PROXY no incluye la dirección IP de minikube ({{.ip_address}}). Consulta {{.documentation_url}} para obtener más información", + "You can also use 'minikube kubectl -- get pods' to invoke a matching version": "", "You can delete them using the following command(s):": "", + "You cannot change the CPUs for an exiting minikube cluster. Please first delete the cluster.": "", + "You cannot change the Disk size for an exiting minikube cluster. Please first delete the cluster.": "", + "You cannot change the memory size for an exiting minikube cluster. Please first delete the cluster.": "", + "You have selected Kubernetes v{{.new}}, but the existing cluster is running Kubernetes v{{.old}}": "", "You may need to manually remove the \"{{.name}}\" VM from your hypervisor": "Puede que tengas que retirar manualmente la VM \"{{.name}}\" de tu hipervisor", "You may need to stop the Hyper-V Manager and run `minikube delete` again.": "", "You must specify a service name": "", "Your host does not support KVM virtualization. Ensure that qemu-kvm is installed, and run 'virt-host-validate' to debug the problem": "", - "Your host does not support virtualization. If you are running minikube within a VM, try '--driver=none'. Otherwise, enable virtualization in your BIOS": "", + "Your host does not support virtualization. If you are running minikube within a VM, try '--driver=docker'. Otherwise, enable virtualization in your BIOS": "", "Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.": "", "Your minikube config refers to an unsupported driver. Erase ~/.minikube, and try again.": "", "Your minikube vm is not running, try minikube start.": "", + "[{{.id}}] {{.msg}} {{.error}}": "", + "adding node": "", "addon '{{.name}}' is currently not enabled.\nTo enable this addon run:\nminikube addons enable {{.name}}": "", "addon '{{.name}}' is not a valid addon packaged with minikube.\nTo see the list of available addons run:\nminikube addons list": "", "addons modifies minikube addons files using subcommands like \"minikube addons enable dashboard\"": "", - "api load": "", "bash completion failed": "", "call with cleanup=true to remove old tunnels": "", - "command runner": "", "config modifies minikube config files using subcommands like \"minikube config set driver kvm\"\nConfigurable fields:\\n\\n": "", "config view failed": "", - "creating api client": "", "dashboard service is not running: {{.error}}": "", + "deleting node": "", "disable failed": "", "dry-run mode. Validates configuration, but does not mutate system state": "", "dry-run validation complete!": "", "enable failed": "", "error creating clientset": "", - "error creating machine client": "", "error getting primary control plane": "", "error getting ssh port": "", "error parsing the input ip address for mount": "", "error starting tunnel": "", "error stopping tunnel": "", + "error: --output must be 'yaml' or 'json'": "", "failed to open browser: {{.error}}": "", - "getting config": "", - "getting primary control plane": "", "if true, will embed the certs in kubeconfig.": "", "if you want to create a profile you can by this command: minikube start -p {{.profile_name}}": "", + "initialization failed, will try again: {{.error}}": "", "kubeadm detected a TCP port conflict with another process: probably another local Kubernetes installation. Run lsof -p\u003cport\u003e to find the process and kill it": "", "kubectl and minikube configuration will be stored in {{.home_folder}}": "La configuración de kubectl y de minikube se almacenará en {{.home_folder}}", - "kubectl not found in PATH, but is required for the dashboard. Installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "", "kubectl proxy": "", - "loading config": "", + "libmachine failed": "", "logdir set failed": "", - "machine '{{.name}}' does not exist. Proceeding ahead with recreating VM.": "", "max time to wait per Kubernetes core services to be healthy.": "", "minikube addons list --output OUTPUT. json, list": "", "minikube is exiting due to an error. If the above message is not useful, open an issue:": "", + "minikube is not yet compatible with ChromeOS": "", "minikube is unable to access the Google Container Registry. You may need to configure it to use a HTTP proxy.": "", - "minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --driver\n\t- Use --force to override this connectivity check": "", + "minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --vm-driver\n\t- Use --force to override this connectivity check": "", "minikube profile was successfully set to {{.profile_name}}": "", "minikube status --output OUTPUT. json, text": "", "minikube {{.version}} is available! Download it: {{.url}}": "", @@ -605,14 +637,16 @@ "mount failed": "", "namespaces to pause": "", "namespaces to unpause": "", + "none driver does not support multi-node clusters": "", "not enough arguments ({{.ArgCount}}).\\nusage: minikube config set PROPERTY_NAME PROPERTY_VALUE": "", "pause containers": "", "profile sets the current minikube profile, or gets the current profile if no arguments are provided. This is used to run and manage multiple minikube instance. You can return to the default minikube profile by running `minikube profile default`": "", - "profile {{.name}} is not running.": "", "reload cached images.": "", "reloads images previously added using the 'cache add' subcommand": "", "retrieving node": "", + "saving node": "", "service {{.namespace_name}}/{{.service_name}} has no node port": "", + "startup failed": "", "stat failed": "", "status json failure": "", "status text failure": "", @@ -636,17 +670,18 @@ "usage: minikube config unset PROPERTY_NAME": "", "usage: minikube delete": "", "usage: minikube profile [MINIKUBE_PROFILE_NAME]": "", + "version json failure": "", + "version yaml failure": "", "zsh completion failed": "", + "{{ .name }}: {{ .rejection }}": "", + "{{.driver_name}} \"{{.cluster}}\" {{.machine_type}} is missing, will recreate.": "", "{{.driver}} does not appear to be installed": "", "{{.driver}} does not appear to be installed, but is specified by an existing profile. Please run 'minikube delete' or install {{.driver}}": "", "{{.extra_option_component_name}}.{{.key}}={{.value}}": "", - "{{.machine}} IP has been updated to point at {{.ip}}": "", - "{{.machine}} IP was already correctly configured for {{.ip}}": "", - "{{.name}} cluster does not exist": "", "{{.name}} has no available configuration options": "", "{{.name}} is already running": "", "{{.name}} was successfully configured": "", - "{{.path}} is version {{.client_version}}, and is incompatible with Kubernetes {{.cluster_version}}. You will need to update {{.path}} or use 'minikube kubectl' to connect with this cluster": "", + "{{.path}} is v{{.client_version}}, which may be incompatible with Kubernetes v{{.cluster_version}}.": "", "{{.prefix}}minikube {{.version}} on {{.platform}}": "{{.prefix}}minikube {{.version}} en {{.platform}}", "{{.type}} is not yet a supported filesystem. We will try anyways!": "", "{{.url}} is not accessible: {{.error}}": "" diff --git a/translations/fr.json b/translations/fr.json index 7dd081da10..1272546bb8 100644 --- a/translations/fr.json +++ b/translations/fr.json @@ -1,20 +1,15 @@ { "\"The '{{.minikube_addon}}' addon is disabled": "", - "\"{{.name}}\" profile does not exist": "Le profil \"{{.name}}\" n'existe pas.", - "\"{{.name}}\" profile does not exist, trying anyways.": "", - "\"{{.node_name}}\" stopped.": "", - "\"{{.profile_name}}\" does not exist, nothing to stop": "", - "\"{{.profile_name}}\" host does not exist, unable to show an IP": "", - "\"{{.profile_name}}\" stopped.": "\"{{.profile_name}}\" est arrêté.", + "\"{{.context}}\" context has been updated to point to {{.hostname}}:{{.port}}": "", + "\"{{.machineName}}\" does not exist, nothing to stop": "\"{{.machineName}} n'exist pas, rien a arrêter.", + "\"{{.name}}\" profile does not exist, trying anyways.": "Le profil \"{{.name}}\" n'existe pas, tentative de suppression quand même.", "'none' driver does not support 'minikube docker-env' command": "", "'none' driver does not support 'minikube mount' command": "", "'none' driver does not support 'minikube podman-env' command": "", "'none' driver does not support 'minikube ssh' command": "", "'{{.driver}}' driver reported an issue: {{.error}}": "", - "'{{.profile}}' is not running": "", - "- {{.profile}}": "", "A VPN or firewall is interfering with HTTP access to the minikube VM. Alternatively, try a different VM driver: https://minikube.sigs.k8s.io/docs/start/": "", - "A firewall is blocking Docker the minikube VM from reaching the internet. You may need to configure it to use a proxy.": "", + "A firewall is blocking Docker the minikube VM from reaching the image repository. You may need to select --image-repository, or use a proxy.": "", "A firewall is interfering with minikube's ability to make outgoing HTTPS requests. You may need to change the value of the HTTPS_PROXY environment variable.": "", "A firewall is likely blocking minikube from reaching the internet. You may need to configure minikube to use a proxy.": "", "A set of apiserver IP Addresses which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine": "", @@ -33,30 +28,31 @@ "Adds a node to the given cluster config, and starts it.": "", "Adds a node to the given cluster.": "", "Advanced Commands:": "", - "After minikube restart the dockerd ports might have changed. To ensure docker-env works properly.\nPlease re-eval the docker-env command:\n\n\t'minikube -p {{.profile_name}} docker-env'": "", "Aliases": "", "Allow user prompts for more information": "", "Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to \\\"auto\\\" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers": "Autre dépôt d'images d'où extraire des images Docker. Il peut être utilisé en cas d'accès limité à gcr.io. Définissez-le sur \\\"auto\\\" pour permettre à minikube de choisir la valeur à votre place. Pour les utilisateurs situés en Chine continentale, vous pouvez utiliser des miroirs gcr.io locaux tels que registry.cn-hangzhou.aliyuncs.com/google_containers.", "Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g)": "Quantité de mémoire RAM allouée à la VM minikube (format : \u003cnombre\u003e[\u003cunité\u003e], où \"unité\" = b, k, m ou g).", - "Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "", + "Amount of RAM to allocate to Kubernetes (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "", "Amount of time to wait for a service in seconds": "", "Amount of time to wait for service in seconds": "", "Another hypervisor, such as VirtualBox, is conflicting with KVM. Please stop the other hypervisor, or use --driver to switch to it.": "", - "Automatically selected the {{.driver}} driver": "", - "Automatically selected the {{.driver}} driver. Other choices: {{.alternates}}": "", + "Another program is using a file required by minikube. If you are using Hyper-V, try stopping the minikube VM from within the Hyper-V manager": "", + "Automatically selected the {{.driver}} driver": "Choix automatique du pilote {{.driver}}", + "Automatically selected the {{.driver}} driver. Other choices: {{.alternates}}": "Choix automatique du pilote {{.driver}}. Autres choix: {{.alternatives}}", "Available Commands": "", "Basic Commands:": "", "Because you are using docker driver on Mac, the terminal needs to be open to run it.": "", "Bind Address: {{.Address}}": "", - "Block until the apiserver is servicing API requests": "", + "Both driver={{.driver}} and vm-driver={{.vmd}} have been set.\n\n Since vm-driver is deprecated, minikube will default to driver={{.driver}}.\n\n If vm-driver is set in the global config, please run \"minikube config unset vm-driver\" to resolve this warning.": "", "Cannot find directory {{.path}} for mount": "", "Cannot use both --output and --format options": "", "Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start": "", "Check that SELinux is disabled, and that the provided apiserver flags are valid": "", "Check that minikube is running and that you have specified the correct namespace (-n flag) if required.": "", - "Check that the provided apiserver flags are valid": "", + "Check that the provided apiserver flags are valid, and that SELinux is disabled": "", "Check that your --kubernetes-version has a leading 'v'. For example: 'v1.1.14'": "", "Check your firewall rules for interference, and run 'virt-host-validate' to check for KVM configuration issues. If you are running minikube within a VM, consider using --driver=none": "", + "Choose a smaller value for --memory, such as 2000": "", "Configuration and Management Commands:": "", "Configure a default route on this Linux host, or use another --driver that does not require it": "", "Configure an external network switch following the official documentation, then add `--hyperv-virtual-switch=\u003cswitch-name\u003e` to `minikube start`": "", @@ -68,16 +64,15 @@ "Could not process error from failed deletion": "", "Could not process errors from failed deletion": "", "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.": "Code pays du miroir d'images à utiliser. Laissez ce paramètre vide pour utiliser le miroir international. Pour les utilisateurs situés en Chine continentale, définissez sa valeur sur \"cn\".", - "Creating Kubernetes in {{.driver_name}} container with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "", + "Creating Kubernetes in {{.driver_name}} {{.machine_type}} with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "", "Creating mount {{.name}} ...": "Création de l'installation {{.name}}…", - "Creating {{.driver_name}} VM (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "Création d'une VM {{.driver_name}} (CPUs={{.number_of_cpus}}, Mémoire={{.memory_size}}MB, Disque={{.disk_size}}MB)...", + "Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "Création de {{.machine_type}} {{.driver_name}} (CPUs={{.number_of_cpus}}, Mémoire={{.memory_size}}MB, Disque={{.disk_size}}MB)...", "DEPRECATED, use `driver` instead.": "", "Default group id used for the mount": "", "Default user id used for the mount": "", "Delete an image from the local cache.": "", "Deletes a local kubernetes cluster": "", - "Deletes a local kubernetes cluster. This command deletes the VM, and removes all\nassociated files.": "", - "Deletes a local kubernetes cluster. This command deletes the VM, and removes all associated files.": "Supprime le cluster Kubernetes local. Cette commande supprime la VM ainsi que tous les fichiers associés.", + "Deletes a local kubernetes cluster. This command deletes the VM, and removes all\nassociated files.": "Supprime le cluster Kubernetes local. Cette commande supprime la VM ainsi que tous les fichiers associés.", "Deletes a node from a cluster.": "", "Deleting \"{{.profile_name}}\" in {{.driver_name}} ...": "Suppression de \"{{.profile_name}}\" dans {{.driver_name}}...", "Deleting node {{.name}} from cluster {{.cluster}}": "Suppression de noeuds {{.name}} de cluster {{.cluster}}", @@ -95,13 +90,12 @@ "Docker inside the VM is unavailable. Try running 'minikube delete' to reset the VM.": "", "Docs have been saved at - {{.path}}": "", "Documentation: {{.url}}": "", - "Done! kubectl is now configured to use \"{{.name}}": "Terminé ! kubectl est maintenant configuré pour utiliser \"{{.name}}\".", - "Done! kubectl is now configured to use \"{{.name}}\"": "", + "Done! kubectl is now configured to use \"{{.name}}\"": "Terminé ! kubectl est maintenant configuré pour utiliser \"{{.name}}\".", "Download complete!": "Téléchargement terminé !", + "Downloading Kubernetes {{.version}} preload ...": "", "Downloading VM boot image ...": "", "Downloading driver {{.driver}}:": "", - "Downloading preloaded images tarball for k8s {{.version}} ...": "", - "Downloading {{.name}} {{.version}}": "", + "Due to {{.driver_name}} networking limitations on {{.os_name}}, {{.addon_name}} addon is not supported for this driver.\nAlternatively to use this addon you can use a vm-based driver:\n\n\t'minikube start --vm=true'\n\nTo track the update on this work in progress feature please check:\nhttps://github.com/kubernetes/minikube/issues/7332": "", "ERROR creating `registry-creds-acr` secret": "", "ERROR creating `registry-creds-dpr` secret": "", "ERROR creating `registry-creds-ecr` secret: {{.error}}": "", @@ -110,20 +104,18 @@ "Enable addons. see `minikube addons list` for a list of valid addon names.": "", "Enable experimental NVIDIA GPU support in minikube": "Active l'assistance expérimentale du GPU NVIDIA dans minikube.", "Enable host resolver for NAT DNS requests (virtualbox driver only)": "Active le résolveur d'hôte pour les requêtes DNS NAT (pilote VirtualBox uniquement).", - "Enable istio needs {{.minMem}} MB of memory and {{.minCpus}} CPUs.": "", "Enable proxy for NAT DNS requests (virtualbox driver only)": "Active le proxy pour les requêtes DNS NAT (pilote VirtualBox uniquement).", "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \\\"--network-plugin=cni\\": "Active le plug-in CNI par défaut (/etc/cni/net.d/k8s.conf). Utilisé en association avec \\\"--network-plugin=cni\\\".", "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \\\"--network-plugin=cni\\\".": "", "Enables the addon w/ADDON_NAME within minikube (example: minikube addons enable dashboard). For a list of available addons use: minikube addons list": "", "Enabling '{{.name}}' returned an error: {{.error}}": "", - "Enabling addons: {{.addons}}": "", + "Enabling addons: {{.addons}}": "Installation des addons: {{.addons}}", "Enabling dashboard ...": "", "Ensure that CRI-O is installed and healthy: Run 'sudo systemctl start crio' and 'journalctl -u crio'. Alternatively, use --container-runtime=docker": "", "Ensure that Docker is installed and healthy: Run 'sudo systemctl start docker' and 'journalctl -u docker'. Alternatively, select another value for --driver": "", "Ensure that the user listed in /etc/libvirt/qemu.conf has access to your home directory": "", "Ensure that your value for HTTPS_PROXY points to an HTTPS proxy rather than an HTTP proxy": "", "Environment variables to pass to the Docker daemon. (format: key=value)": "Variables d'environment à transmettre au daemon Docker (format : clé = valeur).", - "Error adding node to cluster": "", "Error checking driver version: {{.error}}": "Erreur lors de la vérification de la version du driver : {{.error}}", "Error creating minikube directory": "", "Error creating view template": "", @@ -132,45 +124,29 @@ "Error finding port for mount": "", "Error generating set output": "", "Error generating unset output": "", - "Error getting IP": "", - "Error getting client": "", - "Error getting client: {{.error}}": "", - "Error getting cluster": "", "Error getting cluster bootstrapper": "", "Error getting cluster config": "", - "Error getting config": "", - "Error getting control plane": "", "Error getting host": "", - "Error getting host IP": "", - "Error getting host status": "", - "Error getting machine logs": "", "Error getting port binding for '{{.driver_name}} driver: {{.error}}": "", "Error getting primary control plane": "", - "Error getting primary cp": "", - "Error getting service status": "", "Error getting service with namespace: {{.namespace}} and labels {{.labelName}}:{{.addonName}}: {{.error}}": "", "Error getting ssh client": "", "Error getting the host IP address to use from within the VM": "", - "Error host driver ip status": "", "Error killing mount process": "", - "Error loading api": "", - "Error loading profile config": "", "Error loading profile config: {{.error}}": "", "Error loading profile {{.name}}: {{.error}}": "Erreur lors du chargement du profil {{.name}} : {{.error}}", "Error opening service": "", "Error parsing Driver version: {{.error}}": "Erreur lors de l'analyse de la version du pilote de la VM : {{.error}}", "Error parsing minikube version: {{.error}}": "Erreur lors de l'analyse de la version de minikube : {{.error}}", "Error reading {{.path}}: {{.error}}": "", - "Error retrieving node": "", "Error starting cluster": "", "Error starting mount": "", - "Error starting node": "", "Error while setting kubectl current context : {{.error}}": "", "Error writing mount pid": "", - "Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}\"": "", "Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}": "Erreur : Vous avez sélectionné Kubernetes v{{.new}}, mais le cluster existent pour votre profil exécute Kubernetes v{{.old}}. Les rétrogradations non-destructives ne sont pas compatibles. Toutefois, vous pouvez poursuivre le processus en réalisant l'une des trois actions suivantes :\n* Créer à nouveau le cluster en utilisant Kubernetes v{{.new}} – exécutez \"minikube delete {{.profile}}\", puis \"minikube start {{.profile}} --kubernetes-version={{.new}}\".\n* Créer un second cluster avec Kubernetes v{{.new}} – exécutez \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\".\n* Réutiliser le cluster existent avec Kubernetes v{{.old}} ou version ultérieure – exécutez \"minikube start {{.profile}} --kubernetes-version={{.old}}\".", - "Error: [{{.id}}] {{.error}}": "", "Examples": "", + "Executing \"{{.command}}\" took an unusually long time: {{.duration}}": "", + "Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'": "", "Exiting": "Fermeture…", "Exiting.": "", "External Adapter on which external switch will be created if no external switch is found. (hyperv driver only)": "", @@ -178,21 +154,21 @@ "Failed to cache ISO": "", "Failed to cache and load images": "", "Failed to cache binaries": "", + "Failed to cache images": "", "Failed to cache images to tar": "", "Failed to cache kubectl": "", "Failed to change permissions for {{.minikube_dir_path}}: {{.error}}": "Échec de la modification des autorisations pour {{.minikube_dir_path}} : {{.error}}", - "Failed to check if machine exists": "", "Failed to check main repository and mirrors for images for images": "", + "Failed to delete cluster {{.name}}, proceeding with retry anyway.": "", "Failed to delete cluster: {{.error}}": "Échec de la suppression du cluster : {{.error}}", "Failed to delete cluster: {{.error}}__1": "Échec de la suppression du cluster : {{.error}}", "Failed to delete images": "", "Failed to delete images from config": "", - "Failed to delete node {{.name}}": "", "Failed to enable container runtime": "", "Failed to generate config": "", + "Failed to get API Server URL": "", "Failed to get bootstrapper": "", "Failed to get command runner": "", - "Failed to get driver URL": "", "Failed to get image map": "", "Failed to get machine client": "", "Failed to get service URL: {{.error}}": "", @@ -203,29 +179,31 @@ "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}": "Échec de la définition de NO_PROXY Env. Veuillez utiliser `export NO_PROXY=$NO_PROXY,{{.ip}}.", "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.": "", "Failed to setup certs": "", - "Failed to setup kubeconfig": "", - "Failed to start node {{.name}}": "", "Failed to stop node {{.name}}": "", "Failed to update cluster": "", "Failed to update config": "", + "Failed to validate '{{.driver}}' driver": "", "Failed unmount: {{.error}}": "", "File permissions used for the mount": "", + "Filter to use only VM Drivers": "", "Flags": "", "Follow": "", "For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "Pour des résultats optimaux, installez kubectl à l'adresse suivante : https://kubernetes.io/docs/tasks/tools/install-kubectl/", "For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/__1": "Pour des résultats optimaux, installez kubectl à l'adresse suivante : https://kubernetes.io/docs/tasks/tools/install-kubectl/", "For more information, see:": "Pour en savoir plus, consultez les pages suivantes :", + "For more information, see: https://minikube.sigs.k8s.io/docs/reference/drivers/none/": "", "Force environment to be configured for a specified shell: [fish, cmd, powershell, tcsh, bash, zsh], default is auto-detect": "", "Force minikube to perform possibly dangerous operations": "Oblige minikube à réaliser des opérations possiblement dangereuses.", "Found network options:": "Options de réseau trouvées :", "Found {{.number}} invalid profile(s) !": "", + "Generate unable to parse disk size '{{.diskSize}}': {{.error}}": "", + "Generate unable to parse memory '{{.memory}}': {{.error}}": "", "Gets the kubernetes URL(s) for the specified service in your local cluster": "", "Gets the kubernetes URL(s) for the specified service in your local cluster. In the case of multiple URLs they will be printed one at a time.": "", "Gets the logs of the running instance, used for debugging minikube, not user code.": "", "Gets the status of a local kubernetes cluster": "", "Gets the status of a local kubernetes cluster.\n\tExit status contains the status of minikube's VM, cluster and kubernetes encoded on it's bits in this order from right to left.\n\tEg: 7 meaning: 1 (for minikube NOK) + 2 (for cluster NOK) + 4 (for kubernetes NOK)": "", "Gets the value of PROPERTY_NAME from the minikube config file": "", - "Getting machine config failed": "", "Global Flags": "", "Go template format string for the cache list output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#CacheListTemplate": "", "Go template format string for the config view output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate": "", @@ -236,6 +214,8 @@ "Hyperkit is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "", "Hyperkit networking is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "", "If set, automatically updates drivers to the latest version. Defaults to true.": "", + "If set, delete the current cluster if start fails and try again. Defaults to false.": "", + "If set, download tarball of preloaded images if available to improve start time. Defaults to true.": "", "If set, install addons. Defaults to true.": "", "If set, pause all namespaces": "", "If set, unpause all namespaces": "", @@ -252,8 +232,9 @@ "Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added.": "Registres Docker non sécurisés à transmettre au daemon Docker. La plage CIDR par défaut du service sera ajoutée automatiquement.", "Install VirtualBox, or select an alternative value for --driver": "", "Install the latest hyperkit binary, and run 'minikube delete'": "", - "Invalid size passed in argument: {{.error}}": "", "IsEnabled failed": "", + "Istio needs {{.minCPUs}} CPUs -- your configuration only allocates {{.cpus}} CPUs": "", + "Istio needs {{.minMem}}MB of memory -- your configuration only allocates {{.memory}}MB": "", "Kill the mount process spawned by minikube start": "", "Kubernetes {{.new}} is now available. If you would like to upgrade, specify: --kubernetes-version={{.new}}": "", "Kubernetes {{.version}} is not supported by this release of minikube": "", @@ -268,7 +249,7 @@ "Local folders to share with Guest via NFS mounts (hyperkit driver only)": "Dossiers locaux à partager avec l'invité par des installations NFS (pilote hyperkit uniquement).", "Location of the VPNKit socket used for networking. If empty, disables Hyperkit VPNKitSock, if 'auto' uses Docker for Mac VPNKit connection, otherwise uses the specified VSock (hyperkit driver only)": "Emplacement du socket VPNKit exploité pour la mise en réseau. Si la valeur est vide, désactive Hyperkit VPNKitSock. Si la valeur affiche \"auto\", utilise la connexion VPNKit de Docker pour Mac. Sinon, utilise le VSock spécifié (pilote hyperkit uniquement).", "Location of the minikube iso": "Emplacement de l'ISO minikube.", - "Location of the minikube iso.": "", + "Locations to fetch the minikube ISO from.": "", "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'": "", "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'.": "", "Message Size: {{.size}}": "", @@ -276,6 +257,7 @@ "Minikube is a tool for managing local Kubernetes clusters.": "", "Modify minikube config": "", "Modify minikube's kubernetes addons": "", + "Most users should use the newer 'docker' driver instead, which does not require root!": "", "Mount type: {{.name}}": "", "Mounting host path {{.sourcePath}} into VM as {{.destinationPath}} ...": "", "Mounts the specified directory into minikube": "", @@ -285,18 +267,23 @@ "NIC Type used for host only network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only)": "", "NOTE: This process must stay alive for the mount to be accessible ...": "", "Networking and Connectivity Commands:": "", + "No changes required for the \"{{.context}}\" context": "", "No minikube profile was found. You can create one using `minikube start`.": "", - "Node may be unable to resolve external DNS records": "", + "Node \"{{.node_name}}\" stopped.": "Le noeud \"{{.node_name}}\" est arrêté.", "Node operations": "", + "Node {{.name}} failed to start, deleting and trying again.": "", "Node {{.name}} was successfully deleted.": "", + "Node {{.nodeName}} does not exist.": "", + "Non-destructive downgrades are not supported, but you can proceed with one of the following options:\n\n 1) Recreate the cluster with Kubernetes v{{.new}}, by running:\n\n minikube delete{{.profile}}\n minikube start{{.profile}} --kubernetes-version={{.new}}\n\n 2) Create a second cluster with Kubernetes v{{.new}}, by running:\n\n minikube start -p {{.suggestedName}} --kubernetes-version={{.new}}\n\n 3) Use the existing cluster at version Kubernetes v{{.old}}, by running:\n\n minikube start{{.profile}} --kubernetes-version={{.old}}": "", "None of the known repositories in your location are accessible. Using {{.image_repository_name}} as fallback.": "Aucun dépôt connu dans votre emplacement n'est accessible. {{.image_repository_name}} est utilisé comme dépôt de remplacement.", "None of the known repositories is accessible. Consider specifying an alternative image repository with --image-repository flag": "Aucun dépôt connu n'est accessible. Pensez à spécifier un autre dépôt d'images à l'aide de l'indicateur \"--image-repository\".", "Not passing {{.name}}={{.value}} to docker env.": "", - "Noticed that you are using minikube docker-env:": "", + "Noticed you have an activated docker-env on {{.driver_name}} driver in this terminal:": "", + "Number of CPUs allocated to Kubernetes.": "", "Number of CPUs allocated to the minikube VM": "Nombre de processeurs alloués à la VM minikube.", - "Number of CPUs allocated to the minikube VM.": "", "Number of lines back to go within the log": "", "OS release is {{.pretty_name}}": "", + "One of 'yaml' or 'json'.": "", "Open the addons URL with https instead of http": "", "Open the service URL with https instead of http": "", "Opening kubernetes service {{.namespace_name}}/{{.service_name}} in default browser...": "", @@ -315,49 +302,62 @@ "Please install the minikube hyperkit VM driver, or select an alternative --driver": "", "Please install the minikube kvm2 VM driver, or select an alternative --driver": "", "Please make sure the service you are looking for is deployed or is in the correct namespace.": "", + "Please re-eval your docker-env, To ensure your environment variables have updated ports: \n\n\t'minikube -p {{.profile_name}} docker-env'": "", "Please specify the directory to be mounted: \n\tminikube mount \u003csource directory\u003e:\u003ctarget directory\u003e (example: \"/host-home:/vm-home\")": "", "Please upgrade the '{{.driver_executable}}'. {{.documentation_url}}": "Veuillez mettre à niveau l'exécutable \"{{.driver_executable}}\". {{.documentation_url}}", "Populates the specified folder with documentation in markdown about minikube": "", "Powering off \"{{.profile_name}}\" via SSH ...": "Mise hors tension du profil \"{{.profile_name}}\" via SSH…", "Preparing Kubernetes {{.k8sVersion}} on {{.runtime}} {{.runtimeVersion}} ...": "Préparation de Kubernetes {{.k8sVersion}} sur {{.runtime}} {{.runtimeVersion}}...", "Print current and latest version number": "", + "Print just the version number.": "", "Print the version of minikube": "", "Print the version of minikube.": "", "Problems detected in {{.entry}}:": "", "Problems detected in {{.name}}:": "", "Profile gets or sets the current minikube profile": "", - "Profile name \"{{.profilename}}\" is minikube keyword. To delete profile use command minikube delete -p \u003cprofile name\u003e": "", + "Profile name \"{{.profilename}}\" is reserved keyword. To delete this profile, run: \"{{.cmd}}\"": "", "Provide VM UUID to restore MAC address (hyperkit driver only)": "Fournit l'identifiant unique universel (UUID) de la VM pour restaurer l'adresse MAC (pilote hyperkit uniquement).", + "Pulling base image ...": "", "Pulling images ...": "Extraction des images... ", "Reboot to complete VirtualBox installation, verify that VirtualBox is not blocked by your system, and/or use another hypervisor": "", "Rebuild libvirt with virt-network support": "", "Received {{.name}} signal": "", - "Reconfiguring existing host ...": "", "Registry mirrors to pass to the Docker daemon": "Miroirs de dépôt à transmettre au daemon Docker.", "Reinstall VirtualBox and reboot. Alternatively, try the kvm2 driver: https://minikube.sigs.k8s.io/docs/reference/drivers/kvm2/": "", "Reinstall VirtualBox and verify that it is not blocked: System Preferences -\u003e Security \u0026 Privacy -\u003e General -\u003e Some system software was blocked from loading": "", + "Related issue: {{.url}}": "", "Related issues:": "", "Relaunching Kubernetes using {{.bootstrapper}} ...": "Redémarrage de Kubernetes à l'aide de {{.bootstrapper}}…", - "Removed all traces of the \"{{.name}}\" cluster.": "", + "Remove the incompatible --docker-opt flag if one was provided": "", + "Removed all traces of the \"{{.name}}\" cluster.": "Le cluster \"{{.name}}\" a été supprimé.", "Removing {{.directory}} ...": "Suppression du répertoire {{.directory}}…", "Requested cpu count {{.requested_cpus}} is less than the minimum allowed of {{.minimum_cpus}}": "", "Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}": "La taille de disque demandée ({{.requested_size}}) est inférieure à la taille minimale ({{.minimum_size}}).", "Requested memory allocation ({{.memory}}MB) is less than the default memory allocation of {{.default_memorysize}}MB. Beware that minikube might not work correctly or crash unexpectedly.": "L'allocation de mémoire demandée ({{.memory}} Mo) est inférieure à l'allocation de mémoire par défaut ({{.default_memorysize}} Mo). Sachez que minikube pourrait ne pas fonctionner correctement ou planter de manière inattendue.", + "Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommended}}MB. Kubernetes may crash unexpectedly.": "", "Requested memory allocation {{.requested_size}} is less than the minimum allowed of {{.minimum_size}}": "L'allocation de mémoire demandée ({{.requested_size}}) est inférieure au minimum autorisé ({{.minimum_size}}).", + "Requested memory allocation {{.requested}}MB is less than the usable minimum of {{.minimum}}MB": "", + "Restart Docker": "", + "Restarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "", + "Restarting the {{.name}} service may improve performance.": "", "Retrieve the ssh identity key path of the specified cluster": "", "Retrieve the ssh identity key path of the specified cluster.": "", "Retrieves the IP address of the running cluster": "", "Retrieves the IP address of the running cluster, and writes it to STDOUT.": "", "Retrieves the IP address of the running cluster, checks it\n\t\t\twith IP in kubeconfig, and corrects kubeconfig if incorrect.": "", "Returns the value of PROPERTY_NAME from the minikube config file. Can be overwritten at runtime by flags or environmental variables.": "", + "Right-click the PowerShell icon and select Run as Administrator to open PowerShell in elevated mode.": "", "Run 'kubectl describe pod coredns -n kube-system' and check for a firewall or DNS conflict": "", "Run 'minikube delete' to delete the stale VM, or and ensure that minikube is running as the same user you are issuing this command with": "", + "Run 'sudo sysctl fs.protected_regular=1', or try a driver which does not require root, such as '--driver=docker'": "", "Run kubectl": "", "Run minikube from the C: drive.": "", "Run the kubernetes client, download it if necessary. Remember -- after kubectl!\n\nExamples:\nminikube kubectl -- --help\nminikube kubectl -- get pods --namespace kube-system": "", - "Run the minikube command as an Administrator": "", "Run: 'chmod 600 $HOME/.kube/config'": "", + "Run: 'kubectl delete clusterrolebinding kubernetes-dashboard'": "", + "Run: 'sudo mkdir /sys/fs/cgroup/systemd \u0026\u0026 sudo mount -t cgroup -o none,name=systemd cgroup /sys/fs/cgroup/systemd'": "", "Running on localhost (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", + "Service '{{.service}}' was not found in '{{.namespace}}' namespace.\nYou may select another namespace by using 'minikube service {{.service}} -n \u003cnamespace\u003e'. Or list out all the services using 'minikube service list'": "", "Set failed": "", "Set flag to delete all profiles": "", "Set this flag to delete the '.minikube' folder from your user directory.": "", @@ -372,6 +372,7 @@ "Show only log entries which point to known problems": "", "Show only the most recent journal entries, and continuously print new entries as they are appended to the journal.": "", "Skipped switching kubectl context for {{.profile_name}} because --keep-context was set.": "", + "Sorry, Kubernetes v{{.k8sVersion}} requires conntrack to be installed in root's path": "", "Sorry, Kubernetes {{.version}} is not supported by this release of minikube": "", "Sorry, completion support is not yet implemented for {{.name}}": "", "Sorry, the kubeadm.{{.parameter_name}} parameter is currently not supported by --extra-config": "Désolé, le paramètre kubeadm.{{.parameter_name}} ne peut actuellement pas être utilisé avec \"--extra-config\".", @@ -383,8 +384,10 @@ "Specify the 9p version that the mount should use": "", "Specify the ip that the mount should be setup on": "", "Specify the mount filesystem type (supported types: 9p)": "", - "Starting existing {{.driver_name}} VM for \"{{.profile_name}}\" ...": "", - "Starting node": "", + "Start failed after cluster deletion": "", + "StartHost failed, but will try again: {{.error}}": "", + "Starting control plane node {{.name}} in cluster {{.cluster}}": "Démarrage du noeud de plan de contrôle {{.name}} dans le cluster {{.cluster}}", + "Starting node {{.name}} in cluster {{.cluster}}": "Démarrage du noeud {{.name}} dans le cluster {{.cluster}}", "Starting tunnel for service {{.service}}.": "", "Starts a local kubernetes cluster": "Démarre un cluster Kubernetes local.", "Starts a node.": "", @@ -397,15 +400,14 @@ "Successfully added {{.name}} to {{.cluster}}!": "", "Successfully deleted all profiles": "", "Successfully mounted {{.sourcePath}} to {{.destinationPath}}": "", - "Successfully powered off Hyper-V. minikube driver -- {{.driver}}": "", "Successfully purged minikube directory located at - [{{.minikubeDirectory}}]": "", "Suggestion: {{.advice}}": "", "Suggestion: {{.fix}}": "", "Target directory {{.path}} must be an absolute path": "", - "The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube --driver={{.driver_name}}'.": "", "The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube --vm-driver={{.driver_name}}": "Le pilote \"{{.driver_name}}\" nécessite de disposer de droits racine. Veuillez exécuter minikube à l'aide de \"sudo minikube --vm-driver={{.driver_name}}\".", + "The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube start --driver={{.driver_name}}'.": "", "The \"{{.driver_name}}\" driver should not be used with root privileges.": "", - "The \"{{.name}}\" cluster has been deleted.": "Le cluster \"{{.name}}\" a été supprimé.", + "The 'none' driver is designed for experts who need to integrate with an existing VM": "", "The 'none' driver provides limited isolation and may reduce system security and reliability.": "L'isolation fournie par le pilote \"none\" (aucun) est limitée, ce qui peut diminuer la sécurité et la fiabilité du système.", "The '{{.addonName}}' addon is enabled": "", "The '{{.driver}}' driver requires elevated permissions. The following commands will be executed:\\n\\n{{ .example }}\\n": "", @@ -421,19 +423,23 @@ "The VM driver exited with an error, and may be corrupt. Run 'minikube start' with --alsologtostderr -v=8 to see the error": "", "The VM that minikube is configured for no longer exists. Run 'minikube delete'": "", "The apiserver listening port": "Port d'écoute du serveur d'API.", - "The apiserver name which is used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine": "", "The apiserver name which is used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine": "Nom du serveur d'API utilisé dans le certificat généré pour Kubernetes. Vous pouvez l'utiliser si vous souhaitez que le serveur d'API soit disponible en dehors de la machine.", "The argument to pass the minikube mount command on start": "Argument à transmettre à la commande d'installation de minikube au démarrage.", "The argument to pass the minikube mount command on start.": "", + "The authoritative apiserver hostname for apiserver certificates and connectivity. This can be used if you want to make the apiserver available from outside the machine": "", "The cluster dns domain name used in the kubernetes cluster": "Nom du domaine DNS du cluster utilisé dans le cluster Kubernetes.", "The container runtime to be used (docker, crio, containerd)": "environment d'exécution du conteneur à utiliser (docker, crio, containerd).", "The container runtime to be used (docker, crio, containerd).": "", + "The control plane for \"{{.name}}\" is paused!": "", + "The control plane node \"{{.name}}\" does not exist.": "", + "The control plane node is not running (state={{.state}})": "", + "The control plane node must be running for this command": "", "The cri socket path to be used": "Chemin d'accès au socket CRI à utiliser.", "The cri socket path to be used.": "", - "The docker service within '{{.profile}}' is not active": "", + "The docker service within '{{.name}}' is not active": "", + "The docker-env command is only compatible with the \"docker\" runtime, but this cluster was configured to use the \"{{.runtime}}\" runtime.": "", "The driver '{{.driver}}' is not supported on {{.os}}": "Le pilote \"{{.driver}}\" n'est pas compatible avec {{.os}}.", - "The driver {{.experimental}} '{{.driver}}' is not supported on {{.os}}": "", - "The existing \"{{.profile_name}}\" VM that was created using the \"{{.old_driver}}\" driver, and is incompatible with the \"{{.driver}}\" driver.": "", + "The existing \"{{.name}}\" VM was created using the \"{{.old}}\" driver, and is incompatible with the \"{{.new}}\" driver.": "", "The hyperv virtual switch name. Defaults to first found. (hyperv driver only)": "Nom du commutateur virtuel hyperv. La valeur par défaut affiche le premier commutateur trouvé (pilote hyperv uniquement).", "The hypervisor does not appear to be configured properly. Run 'minikube start --alsologtostderr -v=1' and inspect the error code": "", "The initial time interval for each check that wait performs in seconds": "", @@ -445,10 +451,13 @@ "The name of the node to delete": "", "The name of the node to start": "", "The node to get logs from. Defaults to the primary control plane.": "", + "The node to ssh into. Defaults to the primary control plane.": "", + "The none driver is not compatible with multi-node clusters.": "", "The number of bytes to use for 9p packet payload": "", + "The number of nodes to spin up. Defaults to 1.": "", "The output format. One of 'json', 'table'": "", "The path on the file system where the docs in markdown need to be saved": "", - "The podman service within '{{.profile}}' is not active": "", + "The podman service within '{{.cluster}}' is not active": "", "The service namespace": "", "The service {{.service}} requires privileged ports to be exposed: {{.ports}}": "", "The services namespace": "", @@ -457,46 +466,63 @@ "The value passed to --format is invalid: {{.error}}": "", "The vmwarefusion driver is deprecated and support for it will be removed in a future release.\n\t\t\tPlease consider switching to the new vmware unified driver, which is intended to replace the vmwarefusion driver.\n\t\t\tSee https://minikube.sigs.k8s.io/docs/reference/drivers/vmware/ for more information.\n\t\t\tTo disable this message, run [minikube config set ShowDriverDeprecationNotification false]": "", "The {{.driver_name}} driver should not be used with root privileges.": "Le pilote {{.driver_name}} ne doit pas être utilisé avec des droits racine.", + "There is no local cluster named \"{{.cluster}}\"": "", "There's a new version for '{{.driver_executable}}'. Please consider upgrading. {{.documentation_url}}": "Une nouvelle version de \"{{.driver_executable}}\" est disponible. Pensez à effectuer la mise à niveau. {{.documentation_url}}", "These changes will take effect upon a minikube delete and then a minikube start": "", "This addon does not have an endpoint defined for the 'addons open' command.\nYou can add one by annotating a service with the label {{.labelName}}:{{.addonName}}": "", "This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true": "Cette opération peut également être réalisée en définissant la variable d'environment \"CHANGE_MINIKUBE_NONE_USER=true\".", + "This control plane is not running! (state={{.state}})": "", + "This driver does not yet work on your architecture. Maybe try --driver=none": "", + "This is unusual - you may want to investigate using \"{{.command}}\"": "", "This will keep the existing kubectl context and will create a minikube context.": "Cela permet de conserver le contexte kubectl existent et de créer un contexte minikube.", "This will start the mount daemon and automatically mount files into minikube": "Cela permet de lancer le daemon d'installation et d'installer automatiquement les fichiers dans minikube.", "This will start the mount daemon and automatically mount files into minikube.": "", + "This {{.type}} is having trouble accessing https://{{.repository}}": "", + "Tip: To remove this root owned cluster, run: sudo {{.cmd}}": "", "Tip: To remove this root owned cluster, run: sudo {{.cmd}} delete": "Conseil : Pour supprimer ce cluster appartenant à la racine, exécutez la commande \"sudo {{.cmd}} delete\".", "To connect to this cluster, use: kubectl --context={{.name}}": "Pour vous connecter à ce cluster, utilisez la commande \"kubectl --context={{.name}}\".", "To connect to this cluster, use: kubectl --context={{.name}}__1": "Pour vous connecter à ce cluster, utilisez la commande \"kubectl --context={{.name}}\".", "To connect to this cluster, use: kubectl --context={{.profile_name}}": "", "To disable this notice, run: 'minikube config set WantUpdateNotification false'\\n": "", - "To proceed, either:\n\n 1) Delete the existing \"{{.profile_name}}\" cluster using: '{{.command}} delete'\n\n * or *\n\n 2) Start the existing \"{{.profile_name}}\" cluster using: '{{.command}} start --driver={{.old_driver}}'": "", + "To fix this, run: \"{{.command}}\"": "", + "To proceed, either:\n\n1) Delete the existing \"{{.name}}\" cluster using: '{{.delcommand}}'\n\n* or *\n\n2) Start the existing \"{{.name}}\" cluster using: '{{.command}} --driver={{.old}}'": "", + "To pull new external images, you may need to configure a proxy: https://minikube.sigs.k8s.io/docs/reference/networking/proxy/": "", "To see addons list for other profiles use: `minikube addons -p name list`": "", - "To start minikube with HyperV Powershell must be in your PATH`": "", + "To start minikube with Hyper-V, Powershell must be in your PATH`": "", "To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:": "Pour utiliser les commandes kubectl ou minikube sous votre propre nom d'utilisateur, vous devrez peut-être les déplacer. Par exemple, pour écraser vos propres paramètres, exécutez la commande suivante :", "Troubleshooting Commands:": "", + "Try 'minikube delete' to force new SSL certificates to be installed": "", + "Try 'minikube delete', and disable any conflicting VPN or firewall software": "", + "Try specifying a --driver, or see https://minikube.sigs.k8s.io/docs/start/": "", "Trying to delete invalid profile {{.profile}}": "", "Unable to bind flags": "", - "Unable to determine a default driver to use. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/": "", "Unable to enable dashboard": "", "Unable to fetch latest version info": "", + "Unable to find control plane": "", "Unable to generate docs": "", "Unable to generate the documentation. Please ensure that the path specified is a directory, exists \u0026 you have permission to write to it.": "", "Unable to get VM IP address": "", "Unable to get addon status for {{.name}}: {{.error}}": "", "Unable to get bootstrapper: {{.error}}": "Impossible d'obtenir l'amorceur : {{.error}}", + "Unable to get command runner": "", + "Unable to get control plane status: {{.error}}": "", "Unable to get current user": "", + "Unable to get forwarded endpoint": "", + "Unable to get machine status": "", "Unable to get runtime": "", - "Unable to get the status of the {{.name}} cluster.": "", "Unable to kill mount process: {{.error}}": "", "Unable to load cached images from config file.": "Impossible de charger les images mises en cache depuis le fichier de configuration.", "Unable to load cached images: {{.error}}": "", "Unable to load config: {{.error}}": "Impossible de charger la configuration : {{.error}}", + "Unable to load host": "", "Unable to parse \"{{.kubernetes_version}}\": {{.error}}": "Impossible d'analyser la version \"{{.kubernetes_version}}\" : {{.error}}", "Unable to parse default Kubernetes version from constants: {{.error}}": "", + "Unable to parse memory '{{.memory}}': {{.error}}": "", "Unable to parse oldest Kubernetes version from constants: {{.error}}": "", + "Unable to pick a default driver. Here is what was considered, in preference order:": "", "Unable to pull images, which may be OK: {{.error}}": "Impossible d'extraire des images, qui sont peut-être au bon format : {{.error}}", - "Unable to remove machine directory: %v": "", - "Unable to start VM. Please investigate and run 'minikube delete' if possible": "", + "Unable to remove machine directory": "", + "Unable to restart cluster, will reset it: {{.error}}": "", "Unable to stop VM": "", "Unable to update {{.driver}} driver: {{.error}}": "", "Unable to verify SSH connectivity: {{.error}}. Will retry...": "", @@ -507,6 +533,7 @@ "Unset the KUBECONFIG environment variable, or verify that it does not point to an empty or otherwise invalid path": "", "Unset variables instead of setting them": "", "Update server returned an empty list": "", + "Updating the running {{.driver_name}} \"{{.cluster}}\" {{.machine_type}} ...": "Mise à jour du {{.machine_type}} {{.driver_name}} en marche \"{{.cluster}}\" ...", "Upgrade to QEMU v3.1.0+, run 'virt-host-validate', or ensure that you are not running in a nested VM environment.": "", "Upgrading from Kubernetes {{.old}} to {{.new}}": "Mise à niveau de Kubernetes de la version {{.old}} à la version {{.new}}…", "Usage": "Usage", @@ -527,11 +554,10 @@ "Userspace file server:": "", "Using image repository {{.name}}": "Utilisation du dépôt d'images {{.name}}…", "Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!": "", - "Using the running {{.driver_name}} \"{{.profile_name}}\" VM ...": "", - "Using the {{.driver}} driver based on existing profile": "", - "Using the {{.driver}} driver based on user configuration": "", + "Using the {{.driver}} driver based on existing profile": "Utilisation du pilote {{.driver}} basé sur le profil existant", + "Using the {{.driver}} driver based on user configuration": "Utilisation du pilote {{.driver}} basé sur la configuration de l'utilisateur", "VM driver is one of: %v": "Le pilote de la VM appartient à : %v", - "VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository": "", + "Validation unable to parse disk size '{{.diskSize}}': {{.error}}": "", "Verify that your HTTP_PROXY and HTTPS_PROXY environment variables are set correctly.": "", "Verify the IP address of the running cluster in kubeconfig.": "", "Verifying dashboard health ...": "", @@ -543,64 +569,65 @@ "VirtualBox is broken. Disable real-time anti-virus software, reboot, and reinstall VirtualBox if the problem continues.": "", "VirtualBox is broken. Reinstall VirtualBox, reboot, and run 'minikube delete'.": "", "VirtualBox is unable to find its network interface. Try upgrading to the latest release and rebooting.": "", - "Virtualization support is disabled on your computer. If you are running minikube within a VM, try '--driver=none'. Otherwise, consult your systems BIOS manual for how to enable virtualization.": "", - "Wait failed": "", + "Virtualization support is disabled on your computer. If you are running minikube within a VM, try '--driver=docker'. Otherwise, consult your systems BIOS manual for how to enable virtualization.": "", "Wait failed: {{.error}}": "", "Wait until Kubernetes core services are healthy before exiting": "Avant de quitter, veuillez patienter jusqu'à ce que les principaux services Kubernetes soient opérationnels.", "Waiting for SSH access ...": "En attente de l'accès SSH...", - "Waiting for cluster to come online ...": "", "Waiting for:": "En attente de :", "Where to root the NFS Shares, defaults to /nfsshares (hyperkit driver only)": "Emplacement permettant d'accéder aux partages NFS en mode root, la valeur par défaut affichant /nfsshares (pilote hyperkit uniquement).", "Whether to use external switch over Default Switch if virtual switch not explicitly specified. (hyperv driver only)": "", "You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details": "Il semble que vous utilisiez un proxy, mais votre environment NO_PROXY n'inclut pas l'adresse IP ({{.ip_address}}) de minikube. Consultez la documentation à l'adresse {{.documentation_url}} pour en savoir plus.", + "You can also use 'minikube kubectl -- get pods' to invoke a matching version": "", "You can delete them using the following command(s):": "", + "You cannot change the CPUs for an exiting minikube cluster. Please first delete the cluster.": "", + "You cannot change the Disk size for an exiting minikube cluster. Please first delete the cluster.": "", + "You cannot change the memory size for an exiting minikube cluster. Please first delete the cluster.": "", + "You have selected Kubernetes v{{.new}}, but the existing cluster is running Kubernetes v{{.old}}": "", "You may need to manually remove the \"{{.name}}\" VM from your hypervisor": "Vous devrez peut-être supprimer la VM \"{{.name}}\" manuellement de votre hyperviseur.", "You may need to stop the Hyper-V Manager and run `minikube delete` again.": "", "You must specify a service name": "", "Your host does not support KVM virtualization. Ensure that qemu-kvm is installed, and run 'virt-host-validate' to debug the problem": "", - "Your host does not support virtualization. If you are running minikube within a VM, try '--driver=none'. Otherwise, enable virtualization in your BIOS": "", + "Your host does not support virtualization. If you are running minikube within a VM, try '--driver=docker'. Otherwise, enable virtualization in your BIOS": "", "Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.": "", "Your minikube config refers to an unsupported driver. Erase ~/.minikube, and try again.": "", "Your minikube vm is not running, try minikube start.": "", + "[{{.id}}] {{.msg}} {{.error}}": "", + "adding node": "", "addon '{{.name}}' is currently not enabled.\nTo enable this addon run:\nminikube addons enable {{.name}}": "", "addon '{{.name}}' is not a valid addon packaged with minikube.\nTo see the list of available addons run:\nminikube addons list": "", "addons modifies minikube addons files using subcommands like \"minikube addons enable dashboard\"": "", - "api load": "", "bash completion failed": "", "call with cleanup=true to remove old tunnels": "", - "command runner": "", "config modifies minikube config files using subcommands like \"minikube config set driver kvm\"\nConfigurable fields:\\n\\n": "", "config view failed": "", - "creating api client": "", "dashboard service is not running: {{.error}}": "", + "deleting node": "", "disable failed": "", "dry-run mode. Validates configuration, but does not mutate system state": "", "dry-run validation complete!": "", "enable failed": "", "error creating clientset": "", - "error creating machine client": "", "error getting primary control plane": "", "error getting ssh port": "", "error parsing the input ip address for mount": "", "error starting tunnel": "", "error stopping tunnel": "", + "error: --output must be 'yaml' or 'json'": "", "failed to open browser: {{.error}}": "", - "getting config": "", - "getting primary control plane": "", "if true, will embed the certs in kubeconfig.": "", "if you want to create a profile you can by this command: minikube start -p {{.profile_name}}": "", + "initialization failed, will try again: {{.error}}": "", "kubeadm detected a TCP port conflict with another process: probably another local Kubernetes installation. Run lsof -p\u003cport\u003e to find the process and kill it": "", "kubectl and minikube configuration will be stored in {{.home_folder}}": "Les configurations kubectl et minikube seront stockées dans le dossier {{.home_folder}}.", - "kubectl not found in PATH, but is required for the dashboard. Installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "", "kubectl proxy": "", - "loading config": "", + "libmachine failed": "", "logdir set failed": "", - "machine '{{.name}}' does not exist. Proceeding ahead with recreating VM.": "", "max time to wait per Kubernetes core services to be healthy.": "", "minikube addons list --output OUTPUT. json, list": "", "minikube is exiting due to an error. If the above message is not useful, open an issue:": "", + "minikube is not yet compatible with ChromeOS": "", "minikube is unable to access the Google Container Registry. You may need to configure it to use a HTTP proxy.": "", - "minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --driver\n\t- Use --force to override this connectivity check": "", + "minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --vm-driver\n\t- Use --force to override this connectivity check": "", "minikube profile was successfully set to {{.profile_name}}": "", "minikube status --output OUTPUT. json, text": "", "minikube {{.version}} is available! Download it: {{.url}}": "", @@ -609,14 +636,16 @@ "mount failed": "", "namespaces to pause": "", "namespaces to unpause": "", + "none driver does not support multi-node clusters": "", "not enough arguments ({{.ArgCount}}).\\nusage: minikube config set PROPERTY_NAME PROPERTY_VALUE": "", "pause containers": "", "profile sets the current minikube profile, or gets the current profile if no arguments are provided. This is used to run and manage multiple minikube instance. You can return to the default minikube profile by running `minikube profile default`": "", - "profile {{.name}} is not running.": "", "reload cached images.": "", "reloads images previously added using the 'cache add' subcommand": "", "retrieving node": "", + "saving node": "", "service {{.namespace_name}}/{{.service_name}} has no node port": "", + "startup failed": "", "stat failed": "", "status json failure": "", "status text failure": "", @@ -640,17 +669,18 @@ "usage: minikube config unset PROPERTY_NAME": "", "usage: minikube delete": "", "usage: minikube profile [MINIKUBE_PROFILE_NAME]": "", + "version json failure": "", + "version yaml failure": "", "zsh completion failed": "", + "{{ .name }}: {{ .rejection }}": "", + "{{.driver_name}} \"{{.cluster}}\" {{.machine_type}} is missing, will recreate.": "", "{{.driver}} does not appear to be installed": "", "{{.driver}} does not appear to be installed, but is specified by an existing profile. Please run 'minikube delete' or install {{.driver}}": "", "{{.extra_option_component_name}}.{{.key}}={{.value}}": "", - "{{.machine}} IP has been updated to point at {{.ip}}": "", - "{{.machine}} IP was already correctly configured for {{.ip}}": "", - "{{.name}} cluster does not exist": "", "{{.name}} has no available configuration options": "", "{{.name}} is already running": "", "{{.name}} was successfully configured": "", - "{{.path}} is version {{.client_version}}, and is incompatible with Kubernetes {{.cluster_version}}. You will need to update {{.path}} or use 'minikube kubectl' to connect with this cluster": "", + "{{.path}} is v{{.client_version}}, which may be incompatible with Kubernetes v{{.cluster_version}}.": "", "{{.prefix}}minikube {{.version}} on {{.platform}}": "{{.prefix}}minikube {{.version}} sur {{.platform}}", "{{.type}} is not yet a supported filesystem. We will try anyways!": "", "{{.url}} is not accessible: {{.error}}": "" diff --git a/translations/ja.json b/translations/ja.json index 9df68fb233..25add034bc 100644 --- a/translations/ja.json +++ b/translations/ja.json @@ -1,65 +1,65 @@ { - "\"The '{{.minikube_addon}}' addon is disabled": "", + "\"The '{{.minikube_addon}}' addon is disabled": "「{{.minikube_addon}}」アドオンは無効化されています", + "\"{{.context}}\" context has been updated to point to {{.hostname}}:{{.port}}": "", + "\"{{.machineName}}\" does not exist, nothing to stop": "「{{.machineName}}」は存在しません。停止すべき対象がありません", "\"{{.minikube_addon}}\" was successfully disabled": "「{{.minikube_addon}}」が無効化されました", - "\"{{.name}}\" cluster does not exist. Proceeding ahead with cleanup.": "「{{.name}}」というクラスターは存在しません。クリーンアップ処理を続行します。", + "\"{{.name}}\" cluster does not exist. Proceeding ahead with cleanup.": "「{{.name}}」というクラスターは存在しません。クリーンアップ処理を続行します", "\"{{.name}}\" profile does not exist": "「{{.name}}」というプロファイルは存在しません", - "\"{{.name}}\" profile does not exist, trying anyways.": "", - "\"{{.node_name}}\" stopped.": "", + "\"{{.name}}\" profile does not exist, trying anyways.": "「{{.name}}」というプロファイルは存在しません", "\"{{.profile_name}}\" VM does not exist, nothing to stop": "「{{.profile_name}}」というVMは存在しません。停止すべき対象がありません", - "\"{{.profile_name}}\" does not exist, nothing to stop": "", "\"{{.profile_name}}\" host does not exist, unable to show an IP": "「{{.profile_name}}」というホストは存在しません。IPを表示できません", - "\"{{.profile_name}}\" stopped.": "「{{.profile_name}}」が停止しました。", + "\"{{.profile_name}}\" stopped.": "「{{.profile_name}}」が停止しました", "'none' driver does not support 'minikube docker-env' command": "「none」ドライバーは「minikube docker-env」コマンドをサポートしていません", "'none' driver does not support 'minikube mount' command": "「none」ドライバーは「minikube mount」コマンドをサポートしていません", - "'none' driver does not support 'minikube podman-env' command": "", + "'none' driver does not support 'minikube podman-env' command": "「none」ドライバーは「minikube podman-env」コマンドをサポートしていません", "'none' driver does not support 'minikube ssh' command": "「none」ドライバーは「minikube ssh」コマンドをサポートしていません", "'{{.driver}}' driver reported an issue: {{.error}}": "「{{.driver}}」ドライバーがエラーを報告しました: {{.error}}", - "'{{.profile}}' is not running": "", - "- {{.profile}}": "", - "A VPN or firewall is interfering with HTTP access to the minikube VM. Alternatively, try a different VM driver: https://minikube.sigs.k8s.io/docs/start/": "", - "A firewall is blocking Docker the minikube VM from reaching the internet. You may need to configure it to use a proxy.": "", - "A firewall is interfering with minikube's ability to make outgoing HTTPS requests. You may need to change the value of the HTTPS_PROXY environment variable.": "", - "A firewall is likely blocking minikube from reaching the internet. You may need to configure minikube to use a proxy.": "", - "A set of apiserver IP Addresses which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine": "", - "A set of apiserver IP Addresses which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine": "Kubernetes 用に生成された証明書で使用される一連の API サーバー IP アドレス。マシンの外部から API サーバーを利用できるようにする場合に使用します。", - "A set of apiserver names which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine": "", - "A set of apiserver names which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine": "Kubernetes 用に生成された証明書で使用される一連の API サーバー名。マシンの外部から API サーバーを利用できるようにする場合に使用します。", + "A VPN or firewall is interfering with HTTP access to the minikube VM. Alternatively, try a different VM driver: https://minikube.sigs.k8s.io/docs/start/": "VPN、あるいはファイアウォールによって、minkube VM への HTTP アクセスが干渉されています。他の手段として、別の VM を試してみてください: https://minikube.sigs.k8s.io/docs/start/", + "A firewall is blocking Docker the minikube VM from reaching the image repository. You may need to select --image-repository, or use a proxy.": "", + "A firewall is blocking Docker the minikube VM from reaching the internet. You may need to configure it to use a proxy.": "ファイアウォールによって、Docker minikube VM はインターネットと繋がることができません。 プロキシを使用するように設定する必要があるかもしれません", + "A firewall is interfering with minikube's ability to make outgoing HTTPS requests. You may need to change the value of the HTTPS_PROXY environment variable.": "ファイアウォールによって、minikube は外側への HTTPS リクエストをすることができません。HTTPS_PROXY 環境変数の値を変える必要があるかもしれません", + "A firewall is likely blocking minikube from reaching the internet. You may need to configure minikube to use a proxy.": "ファイアウォールによって、minikube がインターネットに繋がることができてない可能性があります。minikube がプロキシーを使うように設定する必要があるかもしれません", + "A set of apiserver IP Addresses which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine": "Kubernetes 用に生成された証明書で使用されている一連の APIサーバーの IP アドレスのセット。 マシンの外部から API サーバーを利用できるようにする場合に使用します", + "A set of apiserver IP Addresses which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine": "Kubernetes 用に生成された証明書で使用される一連の API サーバー IP アドレス。マシンの外部から API サーバーを利用できるようにする場合に使用します", + "A set of apiserver names which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine": "Kubernetes 用に生成された証明書で使用される一連の API サーバー名。マシンの外部から API サーバーを利用できるようにする場合に使用します", + "A set of apiserver names which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine": "Kubernetes 用に生成された証明書で使用される一連の API サーバー名。マシンの外部から API サーバーを利用できるようにする場合に使用します", "A set of key=value pairs that describe configuration that may be passed to different components.\nThe key should be '.' separated, and the first part before the dot is the component to apply the configuration to.\nValid components are: kubelet, kubeadm, apiserver, controller-manager, etcd, proxy, scheduler\nValid kubeadm parameters:": "さまざまなコンポーネントに渡される可能性のある構成を記述する一連の key=value ペア。\nキーは「.」で区切る必要があり、このドットより前の部分は構成の適用先のコンポーネントを表します。\n有効なコンポーネントは、kubelet、kubeadm、apiserver、controller-manager、etcd、proxy、scheduler です。\n有効な kubeadm パラメータ:", - "A set of key=value pairs that describe feature gates for alpha/experimental features.": "アルファ版または試験運用版の機能のフィーチャーゲートを記述する一連の key=value ペアです。", - "Access the kubernetes dashboard running within the minikube cluster": "", + "A set of key=value pairs that describe feature gates for alpha/experimental features.": "アルファ版または試験運用版の機能のフィーチャーゲートを記述する一連の key=value ペアです", + "Access the kubernetes dashboard running within the minikube cluster": "minikube クラスタ内で動いている Kubernetes のダッシュボードにアクセスします", "Add an image to local cache.": "イメージをローカルキャッシュに追加します", - "Add machine IP to NO_PROXY environment variable": "", - "Add or delete an image from the local cache.": "", - "Adding node {{.name}} to cluster {{.cluster}}": "", - "Additional help topics": "", - "Additional mount options, such as cache=fscache": "", - "Adds a node to the given cluster config, and starts it.": "", - "Adds a node to the given cluster.": "", - "Advanced Commands:": "", - "After minikube restart the dockerd ports might have changed. To ensure docker-env works properly.\nPlease re-eval the docker-env command:\n\n\t'minikube -p {{.profile_name}} docker-env'": "", - "Aliases": "", + "Add machine IP to NO_PROXY environment variable": "マシーンの IP アドレスをNO_PROXY 環境変数に追加します", + "Add or delete an image from the local cache.": "ローカルのキャッシュからイメージを追加あるいは削除します", + "Adding node {{.name}} to cluster {{.cluster}}": "「{{.name}}」というノードを「{{.cluster}}」というクラスタに追加します", + "Additional help topics": "追加のトピック", + "Additional mount options, such as cache=fscache": "cache=fscache などの追加のマウントオプション", + "Adds a node to the given cluster config, and starts it.": "ノードをクラスタの設定に追加して、起動します", + "Adds a node to the given cluster.": "ノードをクラスターに追加します", + "Advanced Commands:": "高度なコマンド", + "Aliases": "エイリアス", "Allow user prompts for more information": "", - "Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to \\\"auto\\\" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers": "Docker イメージの pull 元の代替イメージ リポジトリ。これは、gcr.io へのアクセスが制限されている場合に使用できます。これを \\\"auto\\\" に設定すると、minikube によって自動的に指定されるようになります。中国本土のユーザーの場合、registry.cn-hangzhou.aliyuncs.com/google_containers などのローカル gcr.io ミラーを使用できます。", + "Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to \\\"auto\\\" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers": "Docker イメージの pull 元の代替イメージ リポジトリ。これは、gcr.io へのアクセスが制限されている場合に使用できます。これを \\\"auto\\\" に設定すると、minikube によって自動的に指定されるようになります。中国本土のユーザーの場合、registry.cn-hangzhou.aliyuncs.com/google_containers などのローカル gcr.io ミラーを使用できます", "Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g)": "minikube VM に割り当てられた RAM 容量(形式: \u003cnumber\u003e[\u003cunit\u003e]、unit = b、k、m、g)", - "Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "", + "Amount of RAM to allocate to Kubernetes (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "", "Amount of time to wait for a service in seconds": "", "Amount of time to wait for service in seconds": "", "Another hypervisor, such as VirtualBox, is conflicting with KVM. Please stop the other hypervisor, or use --driver to switch to it.": "", + "Another program is using a file required by minikube. If you are using Hyper-V, try stopping the minikube VM from within the Hyper-V manager": "", "Automatically selected the {{.driver}} driver": "", "Automatically selected the {{.driver}} driver. Other choices: {{.alternates}}": "", - "Available Commands": "", - "Basic Commands:": "", + "Available Commands": "利用可能なコマンド", + "Basic Commands:": "基本的なコマンド", "Because you are using docker driver on Mac, the terminal needs to be open to run it.": "", - "Bind Address: {{.Address}}": "", - "Block until the apiserver is servicing API requests": "", + "Bind Address: {{.Address}}": "アドレスをバインドします: {{.Address}}", + "Both driver={{.driver}} and vm-driver={{.vmd}} have been set.\n\n Since vm-driver is deprecated, minikube will default to driver={{.driver}}.\n\n If vm-driver is set in the global config, please run \"minikube config unset vm-driver\" to resolve this warning.": "", "Cannot find directory {{.path}} for mount": "", "Cannot use both --output and --format options": "", "Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start": "", "Check that SELinux is disabled, and that the provided apiserver flags are valid": "", "Check that minikube is running and that you have specified the correct namespace (-n flag) if required.": "", - "Check that the provided apiserver flags are valid": "", + "Check that the provided apiserver flags are valid, and that SELinux is disabled": "", "Check that your --kubernetes-version has a leading 'v'. For example: 'v1.1.14'": "", "Check your firewall rules for interference, and run 'virt-host-validate' to check for KVM configuration issues. If you are running minikube within a VM, consider using --driver=none": "", + "Choose a smaller value for --memory, such as 2000": "", "Configuration and Management Commands:": "", "Configure a default route on this Linux host, or use another --driver that does not require it": "", "Configure an external network switch following the official documentation, then add `--hyperv-virtual-switch=\u003cswitch-name\u003e` to `minikube start`": "", @@ -69,133 +69,117 @@ "Confirm that you have supplied the correct value to --hyperv-virtual-switch using the 'Get-VMSwitch' command": "", "Could not process error from failed deletion": "", "Could not process errors from failed deletion": "", - "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.": "使用するイメージミラーの国コード。グローバルのものを使用する場合は空のままにします。中国本土のユーザーの場合は、「cn」に設定します。", - "Creating Kubernetes in {{.driver_name}} container with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "", + "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.": "使用するイメージミラーの国コード。グローバルのものを使用する場合は空のままにします。中国本土のユーザーの場合は、「cn」に設定します", + "Creating Kubernetes in {{.driver_name}} {{.machine_type}} with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "", "Creating mount {{.name}} ...": "マウント {{.name}} を作成しています...", - "Creating {{.driver_name}} VM (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", - "DEPRECATED, use `driver` instead.": "", - "Default group id used for the mount": "", - "Default user id used for the mount": "", - "Delete an image from the local cache.": "", - "Deletes a local kubernetes cluster": "", + "Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", + "DEPRECATED, use `driver` instead.": "非推奨。代わりに driver を使ってください", + "Default group id used for the mount": "マウント時のデフォルトのグループ ID", + "Default user id used for the mount": "マウント時のデフォルトのユーザー ID", + "Delete an image from the local cache.": "ローカルのキャッシュからイメージを削除します", + "Deletes a local kubernetes cluster": "ローカルの Kubernetes クラスタを削除します", "Deletes a local kubernetes cluster. This command deletes the VM, and removes all\nassociated files.": "", - "Deletes a local kubernetes cluster. This command deletes the VM, and removes all associated files.": "ローカルの Kubernetes クラスタを削除します。このコマンドによって、VM とそれに関連付けられているすべてのファイルが削除されます。", - "Deletes a node from a cluster.": "", + "Deletes a local kubernetes cluster. This command deletes the VM, and removes all associated files.": "ローカルの Kubernetes クラスタを削除します。このコマンドによって、VM とそれに関連付けられているすべてのファイルが削除されます", + "Deletes a node from a cluster.": "ノードをクラスタから削除します", "Deleting \"{{.profile_name}}\" in {{.driver_name}} ...": "{{.driver_name}} の「{{.profile_name}}」を削除しています...", - "Deleting node {{.name}} from cluster {{.cluster}}": "", + "Deleting node {{.name}} from cluster {{.cluster}}": "{{.cluster}} クラスタから {{.name}} ノードを削除しています", "Disable checking for the availability of hardware virtualization before the vm is started (virtualbox driver only)": "VM が起動する前にハードウェアの仮想化の可用性チェックを無効にします(virtualbox ドライバのみ)", "Disable dynamic memory in your VM manager, or pass in a larger --memory value": "", "Disables the addon w/ADDON_NAME within minikube (example: minikube addons disable dashboard). For a list of available addons use: minikube addons list": "", "Disables the filesystem mounts provided by the hypervisors": "ハイパーバイザによって指定されているファイル システム マウントを無効にします", "Disk size allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g)": "minikube VM に割り当てられたディスクサイズ(形式: \u003cnumber\u003e[\u003cunit\u003e]、unit = b、k、m、g)", "Disk size allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "", - "Display dashboard URL instead of opening a browser": "", + "Display dashboard URL instead of opening a browser": "ブラウザで開く代わりにダッシュボードの URL を表示します", "Display the kubernetes addons URL in the CLI instead of opening it in the default browser": "", "Display the kubernetes service URL in the CLI instead of opening it in the default browser": "", - "Display values currently set in the minikube config file": "", + "Display values currently set in the minikube config file": "現在の minikube の設定ファイルにセットされている値を表示します", "Display values currently set in the minikube config file.": "", "Docker inside the VM is unavailable. Try running 'minikube delete' to reset the VM.": "", - "Docs have been saved at - {{.path}}": "", - "Documentation: {{.url}}": "", + "Docs have been saved at - {{.path}}": "ドキュメントは以下のパスに保存されました。{{.path}}", + "Documentation: {{.url}}": "ドキュメント: {{.url}}", "Done! kubectl is now configured to use \"{{.name}}": "完了しました。kubectl が「{{.name}}」を使用するよう構成されました", "Done! kubectl is now configured to use \"{{.name}}\"": "", "Done! kubectl is now configured to use \"{{.name}}__1": "完了しました。kubectl が「{{.name}}」を使用するよう構成されました", - "Download complete!": "ダウンロードが完了しました。", - "Downloading VM boot image ...": "", - "Downloading driver {{.driver}}:": "", - "Downloading preloaded images tarball for k8s {{.version}} ...": "", - "Downloading {{.name}} {{.version}}": "", - "ERROR creating `registry-creds-acr` secret": "", - "ERROR creating `registry-creds-dpr` secret": "", - "ERROR creating `registry-creds-ecr` secret: {{.error}}": "", - "ERROR creating `registry-creds-gcr` secret: {{.error}}": "", + "Download complete!": "ダウンロードが完了しました", + "Downloading Kubernetes {{.version}} preload ...": "Kubernetes {{.version}} のダウンロードの準備をしています", + "Downloading VM boot image ...": "VM ブートイメージをダウンロードしています...", + "Downloading driver {{.driver}}:": "{{.driver}} ドライバをダウンロードしています:", + "Due to {{.driver_name}} networking limitations on {{.os_name}}, {{.addon_name}} addon is not supported for this driver.\nAlternatively to use this addon you can use a vm-based driver:\n\n\t'minikube start --vm=true'\n\nTo track the update on this work in progress feature please check:\nhttps://github.com/kubernetes/minikube/issues/7332": "", + "ERROR creating `registry-creds-acr` secret": "`registry-creds-acr` シークレット作成中にエラーが発生しました", + "ERROR creating `registry-creds-dpr` secret": "`registry-creds-dpr` シークレット作成中にエラーが発生しました", + "ERROR creating `registry-creds-ecr` secret: {{.error}}": "`registry-creds-ecr` シークレット作成中にエラーが発生しました。{{.error}}", + "ERROR creating `registry-creds-gcr` secret: {{.error}}": "`registry-creds-gcr` シークレット作成中にエラーが発生しました。{{.error}}", "Either systemctl is not installed, or Docker is broken. Run 'sudo systemctl start docker' and 'journalctl -u docker'": "", "Enable addons. see `minikube addons list` for a list of valid addon names.": "", "Enable experimental NVIDIA GPU support in minikube": "minikube での試験運用版 NVIDIA GPU の対応を有効にします", "Enable host resolver for NAT DNS requests (virtualbox driver only)": "NAT DNS リクエスト用のホストリゾルバを有効にします(virtualbox ドライバのみ)", - "Enable istio needs {{.minMem}} MB of memory and {{.minCpus}} CPUs.": "", "Enable proxy for NAT DNS requests (virtualbox driver only)": "NAT DNS リクエスト用のプロキシを有効にします(virtualbox ドライバのみ)", - "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \\\"--network-plugin=cni\\": "デフォルトの CNI プラグイン(/etc/cni/net.d/k8s.conf)を有効にします。\\\"--network-plugin=cni\\\" と組み合わせて使用されます。", + "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \\\"--network-plugin=cni\\": "デフォルトの CNI プラグイン(/etc/cni/net.d/k8s.conf)を有効にします。\\\"--network-plugin=cni\\\" と組み合わせて使用されます", "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \\\"--network-plugin=cni\\\".": "", "Enables the addon w/ADDON_NAME within minikube (example: minikube addons enable dashboard). For a list of available addons use: minikube addons list": "", - "Enabling '{{.name}}' returned an error: {{.error}}": "", - "Enabling addons: {{.addons}}": "", - "Enabling dashboard ...": "", + "Enabling '{{.name}}' returned an error: {{.error}}": "'{{.name}}' を有効にする際にエラーが発生しました。{{.error}}", + "Enabling addons: {{.addons}}": "アドオンを有効化しています: {{.addons}}", + "Enabling dashboard ...": "ダッシュボードを有効化しています...", "Ensure that CRI-O is installed and healthy: Run 'sudo systemctl start crio' and 'journalctl -u crio'. Alternatively, use --container-runtime=docker": "", "Ensure that Docker is installed and healthy: Run 'sudo systemctl start docker' and 'journalctl -u docker'. Alternatively, select another value for --driver": "", "Ensure that the user listed in /etc/libvirt/qemu.conf has access to your home directory": "", "Ensure that your value for HTTPS_PROXY points to an HTTPS proxy rather than an HTTP proxy": "", "Environment variables to pass to the Docker daemon. (format: key=value)": "Docker デーモンに渡す環境変数(形式: Key=Value)", - "Error adding node to cluster": "", + "Error adding node to cluster": "クラスタにノードを追加中にエラーが発生しました", "Error checking driver version: {{.error}}": "ドライバのバージョンの確認中にエラーが発生しました。{{.error}}", - "Error creating minikube directory": "", - "Error creating view template": "", - "Error detecting shell": "", - "Error executing view template": "", - "Error finding port for mount": "", - "Error generating set output": "", - "Error generating unset output": "", - "Error getting IP": "", - "Error getting client": "", - "Error getting client: {{.error}}": "", - "Error getting cluster": "", - "Error getting cluster bootstrapper": "", - "Error getting cluster config": "", - "Error getting config": "", - "Error getting control plane": "", - "Error getting host": "", - "Error getting host IP": "", - "Error getting host status": "", - "Error getting machine logs": "", - "Error getting port binding for '{{.driver_name}} driver: {{.error}}": "", - "Error getting primary control plane": "", - "Error getting primary cp": "", - "Error getting service status": "", + "Error creating minikube directory": "minikube のディレクトリ作成中にエラーが発生しました", + "Error creating view template": "表示用のテンプレートを作成中にエラーが発生しました", + "Error detecting shell": "シェルの確認中にエラーが発生しました", + "Error executing view template": "表示用のテンプレートを実行中にエラーが発生しました", + "Error finding port for mount": "マウント用のポートを確認中にエラーが発生しました", + "Error generating set output": "set の出力を生成中にエラーが発生しました", + "Error generating unset output": "unset の出力を生成中にエラーが発生しました", + "Error getting cluster bootstrapper": "クラスタのブートストラッパを取得中にエラーが発生しました", + "Error getting cluster config": "クラスタの設定を取得中にエラーが発生しました", + "Error getting host": "ホストを取得中にエラーが発生しました", + "Error getting port binding for '{{.driver_name}} driver: {{.error}}": "「{{.driver_name}}」ドライバー用のポートをバインディング中にエラーが発生しました", + "Error getting primary control plane": "コントロールプレーンを取得中にエラーが発生しました", "Error getting service with namespace: {{.namespace}} and labels {{.labelName}}:{{.addonName}}: {{.error}}": "", - "Error getting ssh client": "", + "Error getting ssh client": "SSH クライアントを取得中にエラーが発生しました", "Error getting the host IP address to use from within the VM": "", - "Error host driver ip status": "", - "Error killing mount process": "", - "Error loading api": "", - "Error loading profile config": "", - "Error loading profile config: {{.error}}": "", + "Error killing mount process": "マウントプロセスを Kill 中にエラーが発生しました", + "Error loading profile config: {{.error}}": "プロフィールの設定を読み込み中にエラーが発生しました。{{.error}}", "Error loading profile {{.name}}: {{.error}}": "プロファイル {{.name}} の読み込み中にエラーが発生しました。{{.error}}", - "Error opening service": "", + "Error opening service": "サービスを公開中にエラーが発生しました", "Error parsing Driver version: {{.error}}": "Driver バージョンの解析中にエラーが発生しました。{{.error}}", "Error parsing minikube version: {{.error}}": "minikube バージョンの解析中にエラーが発生しました。{{.error}}", - "Error reading {{.path}}: {{.error}}": "", - "Error retrieving node": "", - "Error starting cluster": "", - "Error starting mount": "", - "Error starting node": "", + "Error reading {{.path}}: {{.error}}": "{{.path}} を読み込み中にエラーが発生しました。{{.error}}", + "Error starting cluster": "クラスタを起動中にエラーが発生しました", + "Error starting mount": "マウントを開始中にエラーが発生しました", "Error while setting kubectl current context : {{.error}}": "", - "Error writing mount pid": "", - "Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}\"": "", - "Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}": "エラー: Kubernetes v{{.new}} が選択されましたが、使用しているプロファイルの既存クラスタで実行されているのは Kubernetes v{{.old}} です。非破壊的なダウングレードはサポートされていませんが、以下のいずれかの方法で続行できます。\n* Kubernetes v{{.new}} を使用してクラスタを再作成する: 「minikube delete {{.profile}}」を実行してから、「minikube start {{.profile}} --kubernetes-version={{.new}}」を実行します。\n* Kubernetes v{{.new}} を使用して 2 つ目のクラスタを作成する: 「minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}」を実行します。\n* Kubernetes v{{.old}} 以降を使用して既存のクラスタを再利用する: 「minikube start {{.profile}} --kubernetes-version={{.old}}」を実行します。", - "Error: [{{.id}}] {{.error}}": "", - "Examples": "", + "Error writing mount pid": "マウントした pid を書き込み中にエラーが発生しました", + "Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}": "エラー: Kubernetes v{{.new}} が選択されましたが、使用しているプロファイルの既存クラスタで実行されているのは Kubernetes v{{.old}} です。非破壊的なダウングレードはサポートされていませんが、以下のいずれかの方法で続行できます。\n* Kubernetes v{{.new}} を使用してクラスタを再作成する: 「minikube delete {{.profile}}」を実行してから、「minikube start {{.profile}} --kubernetes-version={{.new}}」を実行します。\n* Kubernetes v{{.new}} を使用して 2 つ目のクラスタを作成する: 「minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}」を実行します。\n* Kubernetes v{{.old}} 以降を使用して既存のクラスタを再利用する: 「minikube start {{.profile}} --kubernetes-version={{.old}}」を実行します", + "Error: [{{.id}}] {{.error}}": "エラーが発生しました。[{{.id}}] {{.error}}", + "Examples": "例", + "Executing \"{{.command}}\" took an unusually long time: {{.duration}}": "", + "Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'": "", "Exiting": "終了しています", - "Exiting.": "終了しています。", + "Exiting.": "終了しています", "External Adapter on which external switch will be created if no external switch is found. (hyperv driver only)": "", "Failed runtime": "", "Failed to cache ISO": "", "Failed to cache and load images": "", "Failed to cache binaries": "", + "Failed to cache images": "", "Failed to cache images to tar": "", "Failed to cache kubectl": "", "Failed to change permissions for {{.minikube_dir_path}}: {{.error}}": "{{.minikube_dir_path}} に対する権限を変更できませんでした。{{.error}}", - "Failed to check if machine exists": "", "Failed to check main repository and mirrors for images for images": "", + "Failed to delete cluster {{.name}}, proceeding with retry anyway.": "", "Failed to delete cluster: {{.error}}": "クラスタを削除できませんでした。{{.error}}", "Failed to delete cluster: {{.error}}__1": "クラスタを削除できませんでした。{{.error}}", "Failed to delete images": "", "Failed to delete images from config": "", - "Failed to delete node {{.name}}": "", "Failed to enable container runtime": "", "Failed to generate config": "", + "Failed to get API Server URL": "", "Failed to get bootstrapper": "", "Failed to get command runner": "", - "Failed to get driver URL": "", "Failed to get image map": "", "Failed to get machine client": "", "Failed to get service URL: {{.error}}": "", @@ -203,103 +187,116 @@ "Failed to list cached images": "", "Failed to reload cached images": "", "Failed to save config": "", - "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}": "NO_PROXY 環境変数を設定できませんでした。「export NO_PROXY=$NO_PROXY,{{.ip}}」を使用してください。", + "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}": "NO_PROXY 環境変数を設定できませんでした。「export NO_PROXY=$NO_PROXY,{{.ip}}」を使用してください", "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.": "", "Failed to setup certs": "", - "Failed to setup kubeconfig": "", - "Failed to start node {{.name}}": "", "Failed to stop node {{.name}}": "", "Failed to update cluster": "", "Failed to update config": "", + "Failed to validate '{{.driver}}' driver": "", "Failed unmount: {{.error}}": "", "File permissions used for the mount": "", - "Flags": "", - "Follow": "", + "Filter to use only VM Drivers": "", + "Flags": "フラグ", + "Follow": "たどる", "For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "最適な結果を得るには、kubectl を次のサイト https://kubernetes.io/docs/tasks/tools/install-kubectl/ からインストールしてください", "For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/__1": "最適な結果を得るには、kubectl を次のサイト https://kubernetes.io/docs/tasks/tools/install-kubectl/ からインストールしてください", - "For more information, see:": "詳細については、次をご覧ください。", + "For more information, see:": "詳細については、次をご覧ください", + "For more information, see: https://minikube.sigs.k8s.io/docs/reference/drivers/none/": "", "Force environment to be configured for a specified shell: [fish, cmd, powershell, tcsh, bash, zsh], default is auto-detect": "", "Force minikube to perform possibly dangerous operations": "minikube で危険な可能性のある操作を強制的に実行します", - "Found network options:": "ネットワーク オプションが見つかりました。", + "Found network options:": "ネットワーク オプションが見つかりました", "Found {{.number}} invalid profile(s) !": "", + "Generate unable to parse disk size '{{.diskSize}}': {{.error}}": "", + "Generate unable to parse memory '{{.memory}}': {{.error}}": "", "Gets the kubernetes URL(s) for the specified service in your local cluster": "", "Gets the kubernetes URL(s) for the specified service in your local cluster. In the case of multiple URLs they will be printed one at a time.": "", "Gets the logs of the running instance, used for debugging minikube, not user code.": "", "Gets the status of a local kubernetes cluster": "", "Gets the status of a local kubernetes cluster.\n\tExit status contains the status of minikube's VM, cluster and kubernetes encoded on it's bits in this order from right to left.\n\tEg: 7 meaning: 1 (for minikube NOK) + 2 (for cluster NOK) + 4 (for kubernetes NOK)": "", "Gets the value of PROPERTY_NAME from the minikube config file": "", - "Getting machine config failed": "", - "Global Flags": "", + "Getting bootstrapper": "ブートストラッパを取得中です", + "Getting primary control plane": "コントロールプレーンを取得中です", + "Global Flags": "グローバルなフラグ", "Go template format string for the cache list output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#CacheListTemplate": "", "Go template format string for the config view output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate": "", "Go template format string for the status output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#Status": "", - "Group ID: {{.groupID}}": "", + "Group ID: {{.groupID}}": "グループ ID: {{.groupID}}", "Have you set up libvirt correctly?": "", "Hide the hypervisor signature from the guest in minikube (kvm2 driver only)": "minikube でゲストに対し、ハイパーバイザ署名を非表示にします(kvm2 ドライバのみ)", "Hyperkit is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "", "Hyperkit networking is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "", "If set, automatically updates drivers to the latest version. Defaults to true.": "", + "If set, delete the current cluster if start fails and try again. Defaults to false.": "", + "If set, download tarball of preloaded images if available to improve start time. Defaults to true.": "", "If set, install addons. Defaults to true.": "", "If set, pause all namespaces": "", "If set, unpause all namespaces": "", "If the above advice does not help, please let us know:": "", "If true, cache docker images for the current bootstrapper and load them into the machine. Always false with --driver=none.": "", - "If true, cache docker images for the current bootstrapper and load them into the machine. Always false with --vm-driver=none.": "true の場合、現在のブートストラッパの Docker イメージをキャッシュに保存して、マシンに読み込みます。--vm-driver=none の場合は常に false です。", - "If true, only download and cache files for later use - don't install or start anything.": "true の場合、後で使用できるようにファイルのダウンロードとキャッシュ保存だけが行われます。インストールも起動も行われません。", + "If true, cache docker images for the current bootstrapper and load them into the machine. Always false with --vm-driver=none.": "true の場合、現在のブートストラッパの Docker イメージをキャッシュに保存して、マシンに読み込みます。--vm-driver=none の場合は常に false です", + "If true, only download and cache files for later use - don't install or start anything.": "true の場合、後で使用できるようにファイルのダウンロードとキャッシュ保存だけが行われます。インストールも起動も行われません", "If true, the added node will be marked for work. Defaults to true.": "", "If true, the node added will also be a control plane in addition to a worker.": "", "If using the none driver, ensure that systemctl is installed": "", "If you are running minikube within a VM, consider using --driver=none:": "", - "Images Commands:": "", + "Images Commands:": "イメージ用のコマンド", "Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added.": "", - "Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added.": "Docker デーモンに渡す Docker レジストリが安全ではありません。デフォルトのサービス CIDR 範囲が自動的に追加されます。", + "Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added.": "Docker デーモンに渡す Docker レジストリが安全ではありません。デフォルトのサービス CIDR 範囲が自動的に追加されます", "Install VirtualBox, or select an alternative value for --driver": "", "Install the latest hyperkit binary, and run 'minikube delete'": "", - "Invalid size passed in argument: {{.error}}": "", - "IsEnabled failed": "", + "IsEnabled failed": "IsEnabled が失敗しました", + "Istio needs {{.minCPUs}} CPUs -- your configuration only allocates {{.cpus}} CPUs": "", + "Istio needs {{.minMem}}MB of memory -- your configuration only allocates {{.memory}}MB": "", "Kill the mount process spawned by minikube start": "", "Kubernetes {{.new}} is now available. If you would like to upgrade, specify: --kubernetes-version={{.new}}": "", "Kubernetes {{.version}} is not supported by this release of minikube": "", "Launching Kubernetes ...": "Kubernetes を起動しています...", - "Launching proxy ...": "", + "Launching proxy ...": "プロキシを起動しています...", "List all available images from the local cache.": "", "List of guest VSock ports that should be exposed as sockets on the host (hyperkit driver only)": "ホストでソケットとして公開する必要のあるゲスト VSock ポートのリスト(hyperkit ドライバのみ)", "Lists all available minikube addons as well as their current statuses (enabled/disabled)": "", - "Lists all minikube profiles.": "", + "Lists all minikube profiles.": "すべてのminikubeのプロフィールを一覧で表示します", "Lists all valid minikube profiles and detects all possible invalid profiles.": "", "Lists the URLs for the services in your local cluster": "", "Local folders to share with Guest via NFS mounts (hyperkit driver only)": "NFS マウントを介してゲストと共有するローカル フォルダ(hyperkit ドライバのみ)", "Location of the VPNKit socket used for networking. If empty, disables Hyperkit VPNKitSock, if 'auto' uses Docker for Mac VPNKit connection, otherwise uses the specified VSock (hyperkit driver only)": "ネットワーキングに使用する VPNKit ソケットのロケーション。空の場合、Hyperkit VPNKitSock が無効になり、「auto」の場合、Mac VPNKit 接続に Docker が使用され、それ以外の場合、指定された VSock が使用されます(hyperkit ドライバのみ)", "Location of the minikube iso": "minikube iso のロケーション", - "Location of the minikube iso.": "", + "Locations to fetch the minikube ISO from.": "", "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'": "", "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'.": "", - "Message Size: {{.size}}": "", + "Message Size: {{.size}}": "メッセージのサイズ: {{.size}}", "Minikube is a CLI tool that provisions and manages single-node Kubernetes clusters optimized for development workflows.": "", "Minikube is a tool for managing local Kubernetes clusters.": "", - "Modify minikube config": "", - "Modify minikube's kubernetes addons": "", - "Mount type: {{.name}}": "", + "Modify minikube config": "minikube の設定を修正しています", + "Modify minikube's kubernetes addons": "minikube の Kubernetes アドオンを修正しています", + "Most users should use the newer 'docker' driver instead, which does not require root!": "", + "Mount type: {{.name}}": "マウントタイプ: {{.name}}", "Mounting host path {{.sourcePath}} into VM as {{.destinationPath}} ...": "", "Mounts the specified directory into minikube": "", "Mounts the specified directory into minikube.": "", - "Multiple errors deleting profiles": "", - "Multiple minikube profiles were found -": "", + "Multiple errors deleting profiles": "プロフィールを削除中に複数のエラーが発生しました", + "Multiple minikube profiles were found -": "複数の minikube のプロフィールが見つかりました", "NIC Type used for host only network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only)": "", "NOTE: This process must stay alive for the mount to be accessible ...": "", "Networking and Connectivity Commands:": "", + "No changes required for the \"{{.context}}\" context": "", "No minikube profile was found. You can create one using `minikube start`.": "", - "Node may be unable to resolve external DNS records": "", - "Node operations": "", - "Node {{.name}} was successfully deleted.": "", - "None of the known repositories in your location are accessible. Using {{.image_repository_name}} as fallback.": "使用しているロケーション内で既知のいずれのリポジトリにもアクセスできません。フォールバックとして {{.image_repository_name}} を使用します。", - "None of the known repositories is accessible. Consider specifying an alternative image repository with --image-repository flag": "既知のいずれのリポジトリにもアクセスできません。--image-repository フラグとともに代替のイメージ リポジトリを指定することを検討してください。", + "Node \"{{.node_name}}\" stopped.": "「{{.node_name}}」ノードが停止しました。", + "Node operations": "ノードの運用", + "Node {{.name}} failed to start, deleting and trying again.": "", + "Node {{.name}} was successfully deleted.": "{{.name}} ノードは削除されました。", + "Node {{.nodeName}} does not exist.": "{{.name}} ノードは存在しません。", + "Non-destructive downgrades are not supported, but you can proceed with one of the following options:\n\n 1) Recreate the cluster with Kubernetes v{{.new}}, by running:\n\n minikube delete{{.profile}}\n minikube start{{.profile}} --kubernetes-version={{.new}}\n\n 2) Create a second cluster with Kubernetes v{{.new}}, by running:\n\n minikube start -p {{.suggestedName}} --kubernetes-version={{.new}}\n\n 3) Use the existing cluster at version Kubernetes v{{.old}}, by running:\n\n minikube start{{.profile}} --kubernetes-version={{.old}}": "", + "None of the known repositories in your location are accessible. Using {{.image_repository_name}} as fallback.": "使用しているロケーション内で既知のいずれのリポジトリにもアクセスできません。フォールバックとして {{.image_repository_name}} を使用します", + "None of the known repositories is accessible. Consider specifying an alternative image repository with --image-repository flag": "既知のいずれのリポジトリにもアクセスできません。--image-repository フラグとともに代替のイメージ リポジトリを指定することを検討してください", "Not passing {{.name}}={{.value}} to docker env.": "", - "Noticed that you are using minikube docker-env:": "", + "Noticed you have an activated docker-env on {{.driver_name}} driver in this terminal:": "", + "Number of CPUs allocated to Kubernetes.": "", "Number of CPUs allocated to the minikube VM": "minikube VM に割り当てられた CPU の数", - "Number of CPUs allocated to the minikube VM.": "", "Number of lines back to go within the log": "", - "OS release is {{.pretty_name}}": "", + "OS release is {{.pretty_name}}": "OS は {{.pretty_name}} です。", + "One of 'yaml' or 'json'.": "", "Open the addons URL with https instead of http": "", "Open the service URL with https instead of http": "", "Opening kubernetes service {{.namespace_name}}/{{.service_name}} in default browser...": "", @@ -318,48 +315,61 @@ "Please install the minikube hyperkit VM driver, or select an alternative --driver": "", "Please install the minikube kvm2 VM driver, or select an alternative --driver": "", "Please make sure the service you are looking for is deployed or is in the correct namespace.": "", + "Please re-eval your docker-env, To ensure your environment variables have updated ports: \n\n\t'minikube -p {{.profile_name}} docker-env'": "", "Please specify the directory to be mounted: \n\tminikube mount \u003csource directory\u003e:\u003ctarget directory\u003e (example: \"/host-home:/vm-home\")": "", "Please upgrade the '{{.driver_executable}}'. {{.documentation_url}}": "「{{.driver_executable}}」をアップグレードしてください。{{.documentation_url}}", "Populates the specified folder with documentation in markdown about minikube": "", "Powering off \"{{.profile_name}}\" via SSH ...": "SSH 経由で「{{.profile_name}}」の電源をオフにしています...", "Preparing Kubernetes {{.k8sVersion}} on {{.runtime}} {{.runtimeVersion}} ...": "{{.runtime}} {{.runtimeVersion}} で Kubernetes {{.k8sVersion}} を準備しています...", "Print current and latest version number": "", + "Print just the version number.": "", "Print the version of minikube": "", "Print the version of minikube.": "", "Problems detected in {{.entry}}:": "", "Problems detected in {{.name}}:": "", "Profile gets or sets the current minikube profile": "", - "Profile name \"{{.profilename}}\" is minikube keyword. To delete profile use command minikube delete -p \u003cprofile name\u003e": "", + "Profile name \"{{.profilename}}\" is reserved keyword. To delete this profile, run: \"{{.cmd}}\"": "", "Provide VM UUID to restore MAC address (hyperkit driver only)": "MAC アドレスを復元するための VM UUID を指定します(hyperkit ドライバのみ)", + "Pulling base image ...": "", "Reboot to complete VirtualBox installation, verify that VirtualBox is not blocked by your system, and/or use another hypervisor": "", "Rebuild libvirt with virt-network support": "", "Received {{.name}} signal": "", - "Reconfiguring existing host ...": "", "Registry mirrors to pass to the Docker daemon": "Docker デーモンに渡すレジストリ ミラー", "Reinstall VirtualBox and reboot. Alternatively, try the kvm2 driver: https://minikube.sigs.k8s.io/docs/reference/drivers/kvm2/": "", "Reinstall VirtualBox and verify that it is not blocked: System Preferences -\u003e Security \u0026 Privacy -\u003e General -\u003e Some system software was blocked from loading": "", + "Related issue: {{.url}}": "", "Related issues:": "", "Relaunching Kubernetes using {{.bootstrapper}} ...": "{{.bootstrapper}} を使用して Kubernetes を再起動しています...", + "Remove the incompatible --docker-opt flag if one was provided": "", "Removed all traces of the \"{{.name}}\" cluster.": "", "Removing {{.directory}} ...": "{{.directory}} を削除しています...", "Requested cpu count {{.requested_cpus}} is less than the minimum allowed of {{.minimum_cpus}}": "", "Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}": "リクエストされたディスクサイズ {{.requested_size}} が最小値 {{.minimum_size}} 未満です", - "Requested memory allocation ({{.memory}}MB) is less than the default memory allocation of {{.default_memorysize}}MB. Beware that minikube might not work correctly or crash unexpectedly.": "リクエストされたメモリ割り当て({{.memory}} MB)がデフォルトのメモリ割り当て {{.default_memorysize}} MB 未満です。minikube が正常に動作しないか、予期せずクラッシュする可能性があることに注意してください。", + "Requested memory allocation ({{.memory}}MB) is less than the default memory allocation of {{.default_memorysize}}MB. Beware that minikube might not work correctly or crash unexpectedly.": "リクエストされたメモリ割り当て({{.memory}} MB)がデフォルトのメモリ割り当て {{.default_memorysize}} MB 未満です。minikube が正常に動作しないか、予期せずクラッシュする可能性があることに注意してください", + "Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommended}}MB. Kubernetes may crash unexpectedly.": "", "Requested memory allocation {{.requested_size}} is less than the minimum allowed of {{.minimum_size}}": "リクエストされたメモリ割り当て {{.requested_size}} が許可される最小値 {{.minimum_size}} 未満です", + "Requested memory allocation {{.requested}}MB is less than the usable minimum of {{.minimum}}MB": "", + "Restart Docker": "", + "Restarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "", + "Restarting the {{.name}} service may improve performance.": "", "Retrieve the ssh identity key path of the specified cluster": "", "Retrieve the ssh identity key path of the specified cluster.": "", "Retrieves the IP address of the running cluster": "", "Retrieves the IP address of the running cluster, and writes it to STDOUT.": "", "Retrieves the IP address of the running cluster, checks it\n\t\t\twith IP in kubeconfig, and corrects kubeconfig if incorrect.": "", "Returns the value of PROPERTY_NAME from the minikube config file. Can be overwritten at runtime by flags or environmental variables.": "", + "Right-click the PowerShell icon and select Run as Administrator to open PowerShell in elevated mode.": "", "Run 'kubectl describe pod coredns -n kube-system' and check for a firewall or DNS conflict": "", "Run 'minikube delete' to delete the stale VM, or and ensure that minikube is running as the same user you are issuing this command with": "", + "Run 'sudo sysctl fs.protected_regular=1', or try a driver which does not require root, such as '--driver=docker'": "", "Run kubectl": "", "Run minikube from the C: drive.": "", "Run the kubernetes client, download it if necessary. Remember -- after kubectl!\n\nExamples:\nminikube kubectl -- --help\nminikube kubectl -- get pods --namespace kube-system": "", - "Run the minikube command as an Administrator": "", "Run: 'chmod 600 $HOME/.kube/config'": "", + "Run: 'kubectl delete clusterrolebinding kubernetes-dashboard'": "", + "Run: 'sudo mkdir /sys/fs/cgroup/systemd \u0026\u0026 sudo mount -t cgroup -o none,name=systemd cgroup /sys/fs/cgroup/systemd'": "", "Running on localhost (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", + "Service '{{.service}}' was not found in '{{.namespace}}' namespace.\nYou may select another namespace by using 'minikube service {{.service}} -n \u003cnamespace\u003e'. Or list out all the services using 'minikube service list'": "", "Set failed": "", "Set flag to delete all profiles": "", "Set this flag to delete the '.minikube' folder from your user directory.": "", @@ -374,6 +384,7 @@ "Show only log entries which point to known problems": "", "Show only the most recent journal entries, and continuously print new entries as they are appended to the journal.": "", "Skipped switching kubectl context for {{.profile_name}} because --keep-context was set.": "", + "Sorry, Kubernetes v{{.k8sVersion}} requires conntrack to be installed in root's path": "", "Sorry, Kubernetes {{.version}} is not supported by this release of minikube": "", "Sorry, completion support is not yet implemented for {{.name}}": "", "Sorry, the kubeadm.{{.parameter_name}} parameter is currently not supported by --extra-config": "申し訳ありません。現在、kubeadm.{{.parameter_name}} パラメータは --extra-config でサポートされていません", @@ -381,12 +392,14 @@ "Specified Kubernetes version {{.specified}} is less than the oldest supported version: {{.oldest}}": "", "Specify --kubernetes-version in v\u003cmajor\u003e.\u003cminor.\u003cbuild\u003e form. example: 'v1.1.14'": "", "Specify an alternate --host-only-cidr value, such as 172.16.0.1/24": "", - "Specify arbitrary flags to pass to the Docker daemon. (format: key=value)": "Docker デーモンに渡す任意のフラグを指定します(形式: key=value)。", + "Specify arbitrary flags to pass to the Docker daemon. (format: key=value)": "Docker デーモンに渡す任意のフラグを指定します(形式: key=value)", "Specify the 9p version that the mount should use": "", "Specify the ip that the mount should be setup on": "", "Specify the mount filesystem type (supported types: 9p)": "", - "Starting existing {{.driver_name}} VM for \"{{.profile_name}}\" ...": "", - "Starting node": "", + "Start failed after cluster deletion": "", + "StartHost failed, but will try again: {{.error}}": "", + "Starting control plane node {{.name}} in cluster {{.cluster}}": "", + "Starting node {{.name}} in cluster {{.cluster}}": "", "Starting tunnel for service {{.service}}.": "", "Starts a local kubernetes cluster": "ローカルの Kubernetes クラスタを起動します", "Starts a node.": "", @@ -399,23 +412,23 @@ "Successfully added {{.name}} to {{.cluster}}!": "", "Successfully deleted all profiles": "", "Successfully mounted {{.sourcePath}} to {{.destinationPath}}": "", - "Successfully powered off Hyper-V. minikube driver -- {{.driver}}": "", "Successfully purged minikube directory located at - [{{.minikubeDirectory}}]": "", "Suggestion: {{.advice}}": "", "Suggestion: {{.fix}}": "", "Target directory {{.path}} must be an absolute path": "", - "The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube --driver={{.driver_name}}'.": "", "The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube --vm-driver={{.driver_name}}": "「{{.driver_name}}」ドライバにはルート権限が必要です。「sudo minikube --vm-driver={{.driver_name}}」を使用して minikube を実行してください", + "The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube start --driver={{.driver_name}}'.": "", "The \"{{.driver_name}}\" driver should not be used with root privileges.": "", - "The \"{{.name}}\" cluster has been deleted.": "「{{.name}}」クラスタが削除されました。", - "The \"{{.name}}\" cluster has been deleted.__1": "「{{.name}}」クラスタが削除されました。", - "The 'none' driver provides limited isolation and may reduce system security and reliability.": "ドライバに「none」を指定すると、分離が制限され、システムのセキュリティと信頼性が低下する可能性があります。", + "The \"{{.name}}\" cluster has been deleted.": "「{{.name}}」クラスタが削除されました", + "The \"{{.name}}\" cluster has been deleted.__1": "「{{.name}}」クラスタが削除されました", + "The 'none' driver is designed for experts who need to integrate with an existing VM": "", + "The 'none' driver provides limited isolation and may reduce system security and reliability.": "ドライバに「none」を指定すると、分離が制限され、システムのセキュリティと信頼性が低下する可能性があります", "The '{{.addonName}}' addon is enabled": "", "The '{{.driver}}' driver requires elevated permissions. The following commands will be executed:\\n\\n{{ .example }}\\n": "", "The '{{.name}} driver does not support multiple profiles: https://minikube.sigs.k8s.io/docs/reference/drivers/none/": "", "The '{{.name}}' driver does not respect the --cpus flag": "", "The '{{.name}}' driver does not respect the --memory flag": "", - "The CIDR to be used for service cluster IPs.": "サービス クラスタ IP に使用される CIDR。", + "The CIDR to be used for service cluster IPs.": "サービス クラスタ IP に使用される CIDR", "The CIDR to be used for the minikube VM (virtualbox driver only)": "minikube VM に使用される CIDR(virtualbox ドライバのみ)", "The KVM QEMU connection URI. (kvm2 driver only)": "KVM QEMU 接続 URI(kvm2 ドライバのみ)", "The KVM driver is unable to resurrect this old VM. Please run `minikube delete` to delete it and try again.": "", @@ -424,19 +437,23 @@ "The VM driver exited with an error, and may be corrupt. Run 'minikube start' with --alsologtostderr -v=8 to see the error": "", "The VM that minikube is configured for no longer exists. Run 'minikube delete'": "", "The apiserver listening port": "API サーバー リスニング ポート", - "The apiserver name which is used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine": "", - "The apiserver name which is used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine": "Kubernetes 用に生成された証明書で使用される API サーバー名。マシンの外部から API サーバーを利用できるようにする場合に使用します。", + "The apiserver name which is used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine": "Kubernetes 用に生成された証明書で使用される API サーバー名。マシンの外部から API サーバーを利用できるようにする場合に使用します", "The argument to pass the minikube mount command on start": "起動時に minikube マウント コマンドを渡す引数", "The argument to pass the minikube mount command on start.": "", + "The authoritative apiserver hostname for apiserver certificates and connectivity. This can be used if you want to make the apiserver available from outside the machine": "", "The cluster dns domain name used in the kubernetes cluster": "Kubernetes クラスタで使用されるクラスタ DNS ドメイン名", "The container runtime to be used (docker, crio, containerd)": "使用されるコンテナ ランタイム(docker、crio、containerd)", "The container runtime to be used (docker, crio, containerd).": "", + "The control plane for \"{{.name}}\" is paused!": "", + "The control plane node \"{{.name}}\" does not exist.": "", + "The control plane node is not running (state={{.state}})": "", + "The control plane node must be running for this command": "", "The cri socket path to be used": "使用される CRI ソケットパス", "The cri socket path to be used.": "", - "The docker service within '{{.profile}}' is not active": "", + "The docker service within '{{.name}}' is not active": "", + "The docker-env command is only compatible with the \"docker\" runtime, but this cluster was configured to use the \"{{.runtime}}\" runtime.": "", "The driver '{{.driver}}' is not supported on {{.os}}": "ドライバ「{{.driver}}」は、{{.os}} ではサポートされていません", - "The driver {{.experimental}} '{{.driver}}' is not supported on {{.os}}": "", - "The existing \"{{.profile_name}}\" VM that was created using the \"{{.old_driver}}\" driver, and is incompatible with the \"{{.driver}}\" driver.": "", + "The existing \"{{.name}}\" VM was created using the \"{{.old}}\" driver, and is incompatible with the \"{{.new}}\" driver.": "", "The hyperv virtual switch name. Defaults to first found. (hyperv driver only)": "hyperv 仮想スイッチ名。最初に見つかったものにデフォルト設定されます(hyperv ドライバのみ)", "The hypervisor does not appear to be configured properly. Run 'minikube start --alsologtostderr -v=1' and inspect the error code": "", "The initial time interval for each check that wait performs in seconds": "", @@ -448,10 +465,13 @@ "The name of the node to delete": "", "The name of the node to start": "", "The node to get logs from. Defaults to the primary control plane.": "", + "The node to ssh into. Defaults to the primary control plane.": "", + "The none driver is not compatible with multi-node clusters.": "", "The number of bytes to use for 9p packet payload": "", + "The number of nodes to spin up. Defaults to 1.": "", "The output format. One of 'json', 'table'": "", "The path on the file system where the docs in markdown need to be saved": "", - "The podman service within '{{.profile}}' is not active": "", + "The podman service within '{{.cluster}}' is not active": "", "The service namespace": "", "The service {{.service}} requires privileged ports to be exposed: {{.ports}}": "", "The services namespace": "", @@ -459,47 +479,64 @@ "The value passed to --format is invalid": "", "The value passed to --format is invalid: {{.error}}": "", "The vmwarefusion driver is deprecated and support for it will be removed in a future release.\n\t\t\tPlease consider switching to the new vmware unified driver, which is intended to replace the vmwarefusion driver.\n\t\t\tSee https://minikube.sigs.k8s.io/docs/reference/drivers/vmware/ for more information.\n\t\t\tTo disable this message, run [minikube config set ShowDriverDeprecationNotification false]": "", - "The {{.driver_name}} driver should not be used with root privileges.": "{{.driver_name}} ドライバをルート権限で使用しないでください。", + "The {{.driver_name}} driver should not be used with root privileges.": "{{.driver_name}} ドライバをルート権限で使用しないでください", + "There is no local cluster named \"{{.cluster}}\"": "", "There's a new version for '{{.driver_executable}}'. Please consider upgrading. {{.documentation_url}}": "「{{.driver_executable}}」の新しいバージョンがあります。アップグレードを検討してください。{{.documentation_url}}", "These changes will take effect upon a minikube delete and then a minikube start": "", "This addon does not have an endpoint defined for the 'addons open' command.\nYou can add one by annotating a service with the label {{.labelName}}:{{.addonName}}": "", "This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true": "これは環境変数 CHANGE_MINIKUBE_NONE_USER=true を設定して自動的に行うこともできます", - "This will keep the existing kubectl context and will create a minikube context.": "これにより既存の kubectl コンテキストが保持され、minikube コンテキストが作成されます。", + "This control plane is not running! (state={{.state}})": "", + "This driver does not yet work on your architecture. Maybe try --driver=none": "", + "This is unusual - you may want to investigate using \"{{.command}}\"": "", + "This will keep the existing kubectl context and will create a minikube context.": "これにより既存の kubectl コンテキストが保持され、minikube コンテキストが作成されます", "This will start the mount daemon and automatically mount files into minikube": "これによりマウント デーモンが起動し、ファイルが minikube に自動的にマウントされます", "This will start the mount daemon and automatically mount files into minikube.": "", + "This {{.type}} is having trouble accessing https://{{.repository}}": "", + "Tip: To remove this root owned cluster, run: sudo {{.cmd}}": "", "Tip: To remove this root owned cluster, run: sudo {{.cmd}} delete": "ヒント: この root 所有のクラスタを削除するには、「sudo {{.cmd}} delete」を実行します", "To connect to this cluster, use: kubectl --context={{.name}}": "このクラスタに接続するには、「kubectl --context={{.name}}」を使用します", "To connect to this cluster, use: kubectl --context={{.name}}__1": "このクラスタに接続するには、「kubectl --context={{.name}}」を使用します", "To connect to this cluster, use: kubectl --context={{.profile_name}}": "", "To disable this notice, run: 'minikube config set WantUpdateNotification false'\\n": "", - "To proceed, either:\n\n 1) Delete the existing \"{{.profile_name}}\" cluster using: '{{.command}} delete'\n\n * or *\n\n 2) Start the existing \"{{.profile_name}}\" cluster using: '{{.command}} start --driver={{.old_driver}}'": "", + "To fix this, run: \"{{.command}}\"": "", + "To proceed, either:\n\n1) Delete the existing \"{{.name}}\" cluster using: '{{.delcommand}}'\n\n* or *\n\n2) Start the existing \"{{.name}}\" cluster using: '{{.command}} --driver={{.old}}'": "", + "To pull new external images, you may need to configure a proxy: https://minikube.sigs.k8s.io/docs/reference/networking/proxy/": "", "To see addons list for other profiles use: `minikube addons -p name list`": "", - "To start minikube with HyperV Powershell must be in your PATH`": "", - "To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:": "kubectl か minikube コマンドを独自のユーザーとして使用するには、そのコマンドの再配置が必要な場合があります。たとえば、独自の設定を上書きするには、以下を実行します。", + "To start minikube with Hyper-V, Powershell must be in your PATH`": "", + "To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:": "kubectl か minikube コマンドを独自のユーザーとして使用するには、そのコマンドの再配置が必要な場合があります。たとえば、独自の設定を上書きするには、以下を実行します", "Troubleshooting Commands:": "", + "Try 'minikube delete' to force new SSL certificates to be installed": "", + "Try 'minikube delete', and disable any conflicting VPN or firewall software": "", + "Try specifying a --driver, or see https://minikube.sigs.k8s.io/docs/start/": "", "Trying to delete invalid profile {{.profile}}": "", "Unable to bind flags": "", - "Unable to determine a default driver to use. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/": "", "Unable to enable dashboard": "", "Unable to fetch latest version info": "", + "Unable to find control plane": "", "Unable to generate docs": "", "Unable to generate the documentation. Please ensure that the path specified is a directory, exists \u0026 you have permission to write to it.": "", "Unable to get VM IP address": "", "Unable to get addon status for {{.name}}: {{.error}}": "", "Unable to get bootstrapper: {{.error}}": "ブートストラッパを取得できません。{{.error}}", + "Unable to get command runner": "", + "Unable to get control plane status: {{.error}}": "", "Unable to get current user": "", + "Unable to get forwarded endpoint": "", + "Unable to get machine status": "", "Unable to get runtime": "", - "Unable to get the status of the {{.name}} cluster.": "", "Unable to kill mount process: {{.error}}": "", - "Unable to load cached images from config file.": "キャッシュに保存されているイメージを構成ファイルから読み込むことができません。", + "Unable to load cached images from config file.": "キャッシュに保存されているイメージを構成ファイルから読み込むことができません", "Unable to load cached images: {{.error}}": "", "Unable to load config: {{.error}}": "構成を読み込むことができません。{{.error}}", + "Unable to load host": "", "Unable to parse \"{{.kubernetes_version}}\": {{.error}}": "「{{.kubernetes_version}}」を解析できません。{{.error}}", "Unable to parse default Kubernetes version from constants: {{.error}}": "", + "Unable to parse memory '{{.memory}}': {{.error}}": "", "Unable to parse oldest Kubernetes version from constants: {{.error}}": "", + "Unable to pick a default driver. Here is what was considered, in preference order:": "", "Unable to pull images, which may be OK: {{.error}}": "イメージを pull できませんが、問題ありません。{{.error}}", - "Unable to remove machine directory: %v": "", - "Unable to start VM. Please investigate and run 'minikube delete' if possible": "", + "Unable to remove machine directory": "", + "Unable to restart cluster, will reset it: {{.error}}": "", "Unable to stop VM": "", "Unable to update {{.driver}} driver: {{.error}}": "", "Unable to verify SSH connectivity: {{.error}}. Will retry...": "", @@ -510,6 +547,7 @@ "Unset the KUBECONFIG environment variable, or verify that it does not point to an empty or otherwise invalid path": "", "Unset variables instead of setting them": "", "Update server returned an empty list": "", + "Updating the running {{.driver_name}} \"{{.cluster}}\" {{.machine_type}} ...": "", "Upgrade to QEMU v3.1.0+, run 'virt-host-validate', or ensure that you are not running in a nested VM environment.": "", "Upgrading from Kubernetes {{.old}} to {{.new}}": "Kubernetes を {{.old}} から {{.new}} にアップグレードしています", "Usage": "", @@ -525,133 +563,138 @@ "Use -A to specify all namespaces": "", "Use VirtualBox to remove the conflicting VM and/or network interfaces": "", "Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'.": "", - "User ID: {{.userID}}": "", - "Userspace file server is shutdown": "", - "Userspace file server:": "", + "User ID: {{.userID}}": "ユーザー ID: {{.userID}}", + "Userspace file server is shutdown": "ユーザー側のファイルサーバーが停止しました", + "Userspace file server:": "ユーザー側のファイルサーバー", "Using image repository {{.name}}": "イメージ リポジトリ {{.name}} を使用しています", - "Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!": "", - "Using the running {{.driver_name}} \"{{.profile_name}}\" VM ...": "", - "Using the {{.driver}} driver based on existing profile": "", - "Using the {{.driver}} driver based on user configuration": "", + "Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!": "「 none 」ドライバで「 {{.runtime}} 」ランタイムを使用することは、テストされていない設定です!", + "Using the {{.driver}} driver based on existing profile": "プロフィールを元に、 {{.driver}} ドライバを使用します", + "Using the {{.driver}} driver based on user configuration": "設定を元に、 {{.driver}} ドライバを使用します", "VM driver is one of: %v": "VM ドライバは次のいずれかです。%v", - "VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository": "", - "Verify that your HTTP_PROXY and HTTPS_PROXY environment variables are set correctly.": "", - "Verify the IP address of the running cluster in kubeconfig.": "", - "Verifying dashboard health ...": "", - "Verifying proxy health ...": "", - "Version: {{.version}}": "", - "VirtualBox and Hyper-V are having a conflict. Use '--driver=hyperv' or disable Hyper-V using: 'bcdedit /set hypervisorlaunchtype off'": "", - "VirtualBox cannot create a network, probably because it conflicts with an existing network that minikube no longer knows about. Try running 'minikube delete'": "", - "VirtualBox is broken. Disable real-time anti-virus software, reboot, and reinstall VirtualBox if the problem continues.": "", - "VirtualBox is broken. Reinstall VirtualBox, reboot, and run 'minikube delete'.": "", - "VirtualBox is unable to find its network interface. Try upgrading to the latest release and rebooting.": "", - "Virtualization support is disabled on your computer. If you are running minikube within a VM, try '--driver=none'. Otherwise, consult your systems BIOS manual for how to enable virtualization.": "", - "Wait failed": "", - "Wait failed: {{.error}}": "", + "Validation unable to parse disk size '{{.diskSize}}': {{.error}}": "検証中に、ディスクのサイズ( {{.diskSize}} )をパースできませんでした。{{.error}}", + "Verify that your HTTP_PROXY and HTTPS_PROXY environment variables are set correctly.": "HTTP_PROXY と HTTPS_PROXY 環境変数が正常に設定されているかを確認します", + "Verify the IP address of the running cluster in kubeconfig.": "Kubernetes の設定ファイルのクラスタの IP アドレスを確認します", + "Verifying dashboard health ...": "ダッシュボードの状態を確認しています...", + "Verifying proxy health ...": "プロキシの状態を確認しています...", + "Version: {{.version}}": "バージョン: {{.version}}", + "VirtualBox and Hyper-V are having a conflict. Use '--driver=hyperv' or disable Hyper-V using: 'bcdedit /set hypervisorlaunchtype off'": "VirtualBox と Hyper-V が衝突しています。「 --driver=hyperv 」を使用するか、以下のコマンドで Hyper-V を無効にしてください。 bcdedit /set hypervisorlaunchtype off", + "VirtualBox cannot create a network, probably because it conflicts with an existing network that minikube no longer knows about. Try running 'minikube delete'": "VirtualBox がネットワークを作成できません。おそらく minikube が把握していないネットワークと衝突しています。「 minikube delete 」を実行してみてください", + "VirtualBox is broken. Disable real-time anti-virus software, reboot, and reinstall VirtualBox if the problem continues.": "VirtualBox が故障しています。アンチウィルスソフトを無効にして、リブートしてください。もし問題が続くようであれば、 VirtualBox を再インストールしてください", + "VirtualBox is broken. Reinstall VirtualBox, reboot, and run 'minikube delete'.": "VirtualBox が故障しています。 VirtualBox を再インストールして、リブートした後に、「 minikube delete 」を実行してください", + "VirtualBox is unable to find its network interface. Try upgrading to the latest release and rebooting.": "VirtualBox はネットワークインターフェイスを見つけることができません。最新のリリースへとアップデートして、リブートしてみてください", + "Virtualization support is disabled on your computer. If you are running minikube within a VM, try '--driver=docker'. Otherwise, consult your systems BIOS manual for how to enable virtualization.": "このコンピュータでは仮想化のサポートが無効化されています。もし VM で minikube を動かすのであれば、「 --driver=docker 」を試してみてください。そうでなければ、仮想化を有効にする方法を BIOS の説明書を調べてください", + "Wait failed: {{.error}}": "待機するのに失敗しました。{{.error}}", "Wait until Kubernetes core services are healthy before exiting": "Kubernetes コアサービスが正常になるまで待機してから終了してください", - "Waiting for cluster to come online ...": "", "Where to root the NFS Shares, defaults to /nfsshares (hyperkit driver only)": "NFS 共有のルートに指定する場所。デフォルトは /nfsshares(hyperkit ドライバのみ)", - "Whether to use external switch over Default Switch if virtual switch not explicitly specified. (hyperv driver only)": "", + "Whether to use external switch over Default Switch if virtual switch not explicitly specified. (hyperv driver only)": "仮想スイッチが明示的に設定されていない場合、デフォルトのではなく外部のスイッチを使用します。(Hyper-V ドライバのみ)", "You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details": "プロキシを使用しようとしていますが、現在の NO_PROXY 環境に minikube IP({{.ip_address}})は含まれていません。詳細については、{{.documentation_url}} をご覧ください", - "You can delete them using the following command(s):": "", + "You can also use 'minikube kubectl -- get pods' to invoke a matching version": "「 minikube kubectl -- get pods 」で、一致するバージョンを表示することができます", + "You can delete them using the following command(s):": "以下のコマンドで削除することができます", + "You cannot change the CPUs for an exiting minikube cluster. Please first delete the cluster.": "", + "You cannot change the Disk size for an exiting minikube cluster. Please first delete the cluster.": "", + "You cannot change the memory size for an exiting minikube cluster. Please first delete the cluster.": "", + "You have selected Kubernetes v{{.new}}, but the existing cluster is running Kubernetes v{{.old}}": "", "You may need to manually remove the \"{{.name}}\" VM from your hypervisor": "ハイパーバイザから「{{.name}}」VM を手動で削除することが必要な可能性があります", - "You may need to stop the Hyper-V Manager and run `minikube delete` again.": "", - "You must specify a service name": "", - "Your host does not support KVM virtualization. Ensure that qemu-kvm is installed, and run 'virt-host-validate' to debug the problem": "", - "Your host does not support virtualization. If you are running minikube within a VM, try '--driver=none'. Otherwise, enable virtualization in your BIOS": "", - "Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.": "", - "Your minikube config refers to an unsupported driver. Erase ~/.minikube, and try again.": "", - "Your minikube vm is not running, try minikube start.": "", - "addon '{{.name}}' is currently not enabled.\nTo enable this addon run:\nminikube addons enable {{.name}}": "", - "addon '{{.name}}' is not a valid addon packaged with minikube.\nTo see the list of available addons run:\nminikube addons list": "", - "addons modifies minikube addons files using subcommands like \"minikube addons enable dashboard\"": "", - "api load": "", - "bash completion failed": "", - "call with cleanup=true to remove old tunnels": "", - "command runner": "", - "config modifies minikube config files using subcommands like \"minikube config set driver kvm\"\nConfigurable fields:\\n\\n": "", - "config view failed": "", - "creating api client": "", - "dashboard service is not running: {{.error}}": "", - "disable failed": "", - "dry-run mode. Validates configuration, but does not mutate system state": "", - "dry-run validation complete!": "", - "enable failed": "", - "error creating clientset": "", - "error creating machine client": "", - "error getting primary control plane": "", - "error getting ssh port": "", - "error parsing the input ip address for mount": "", - "error starting tunnel": "", - "error stopping tunnel": "", - "failed to open browser: {{.error}}": "", - "getting config": "", - "getting primary control plane": "", - "if true, will embed the certs in kubeconfig.": "", - "if you want to create a profile you can by this command: minikube start -p {{.profile_name}}": "", - "kubeadm detected a TCP port conflict with another process: probably another local Kubernetes installation. Run lsof -p\u003cport\u003e to find the process and kill it": "", + "You may need to stop the Hyper-V Manager and run `minikube delete` again.": "Hyper-V マネージャを停止して、「 minikube delete 」を再実行する必要があるかもしれません ", + "You must specify a service name": "サービスの名前を明示する必要があります", + "Your host does not support KVM virtualization. Ensure that qemu-kvm is installed, and run 'virt-host-validate' to debug the problem": "ホストマシーンは KVM 仮想化をサポートしていません。 qemu-kvm がインストールされていることを確認してください。「 virt-host-validate 」を実行して、デバッグしてください", + "Your host does not support virtualization. If you are running minikube within a VM, try '--driver=docker'. Otherwise, enable virtualization in your BIOS": "ホストマシーンは仮想化をサポートしていません。もし VM 内で minikube を動かすのであれば、「 --driver=docker 」を試してください。そうでなければ、 BIOS で仮想化を有効にしてください", + "Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.": "ホストマシーンが minikube の VM にパケットをルーティングすることができていません。もし VPN を有効しているのであれば、VPN を無効にする、あるいは VM の IP アドレスに再ルーティングしないように設定してください。もし VPN を使用していないのであれば、 VM 環境のルーティング周りのオプションを確認してください", + "Your minikube config refers to an unsupported driver. Erase ~/.minikube, and try again.": "今の minikube の設定はサポートされていないドライバーを参照しています。 ~/.minikube を削除して、もう一度試してください", + "Your minikube vm is not running, try minikube start.": "minikube の VM が動いていません。以下のコマンドを試してみてください。 minikube start", + "[{{.id}}] {{.msg}} {{.error}}": "[{{.id}}] {{.msg}} {{.error}}", + "adding node": "ノードを追加しています", + "addon '{{.name}}' is currently not enabled.\nTo enable this addon run:\nminikube addons enable {{.name}}": "「 {{.name}} 」アドオンは現在無効になっています。\n有効にするためには、以下のコマンドを実行してくだいさい。 \nminikube addons enable {{.name}}", + "addon '{{.name}}' is not a valid addon packaged with minikube.\nTo see the list of available addons run:\nminikube addons list": "「 {{.name}} 」アドオンは minikube では有効なアドオンではありません。\n利用可能なアドオンの一覧を表示するためには、以下のコマンドを実行してください。 \nminikube addons list", + "addons modifies minikube addons files using subcommands like \"minikube addons enable dashboard\"": "addons では以下のようにサブコマンドを使用することで、 minikube のアドオンのファイルを編集することができます。 \"minikube addons enable dashboard\"", + "bash completion failed": "bash の補完が失敗しました", + "call with cleanup=true to remove old tunnels": "cleanup=true で呼び出すことで、古い tunnel を削除することができます", + "config modifies minikube config files using subcommands like \"minikube config set driver kvm\"\nConfigurable fields:\\n\\n": "config では以下のようにサブコマンドを使用して、minikube の設定ファイルを編集することができます。 \"minikube config set driver kvm\"\n設定可能なフィールドは以下です。\\n\\n", + "config view failed": "設定を表示するのに失敗しました", + "dashboard service is not running: {{.error}}": "ダッシュボードのサービスが動いていません。 {{.error}}", + "deleting node": "ノードを削除しています", + "disable failed": "無効にするのに失敗しました", + "dry-run mode. Validates configuration, but does not mutate system state": "dry-run モードです。設定は検証しますが、実際にシステムの状態を変更することはしません", + "dry-run validation complete!": "dry-run の検証が終了しました", + "enable failed": "有効にするのに失敗しました", + "error creating clientset": "Clientset を作成する際にエラーが発生しました", + "error getting primary control plane": "コントロールプレーンを取得する際にエラーが発生しました", + "error getting ssh port": "SSH のポートを取得する際にエラーが発生しました", + "error parsing the input ip address for mount": "マウント用の入力された IP アドレスをパースする際にエラーが発生しました", + "error starting tunnel": "tunnel を開始する際にエラーが発生しました", + "error stopping tunnel": "tunnel を停止する際にエラーが発生しました", + "error: --output must be 'yaml' or 'json'": "エラーです。 --output は「 yaml 」、あるいは「 json 」である必要があります", + "failed to open browser: {{.error}}": "ブラウザを起動するのに失敗しました。 {{.error}}", + "if true, will embed the certs in kubeconfig.": "有効であれば、Kubernetes の設定ファイルに証明書を埋め込みます", + "if you want to create a profile you can by this command: minikube start -p {{.profile_name}}": "minikube のプロフィールを作成する場合は、以下のコマンドで作成できます。 minikube start -p {{.profile_name}}", + "initialization failed, will try again: {{.error}}": "初期化が失敗しました。再施行します。 {{.error}}", + "kubeadm detected a TCP port conflict with another process: probably another local Kubernetes installation. Run lsof -p\u003cport\u003e to find the process and kill it": "kubeadm が他のプロセス(おそらくローカルでの他の Kubernetes をインストールするプロセス)との TCP ポートでの衝突を検知しました。 lsof -p\u003cport\u003e を実行して、そのプロセスを Kill してください", "kubectl and minikube configuration will be stored in {{.home_folder}}": "kubectl と minikube の構成は {{.home_folder}} に保存されます", - "kubectl not found in PATH, but is required for the dashboard. Installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "", - "kubectl proxy": "", - "loading config": "", - "logdir set failed": "", - "machine '{{.name}}' does not exist. Proceeding ahead with recreating VM.": "", - "max time to wait per Kubernetes core services to be healthy.": "", - "minikube addons list --output OUTPUT. json, list": "", - "minikube is exiting due to an error. If the above message is not useful, open an issue:": "", - "minikube is unable to access the Google Container Registry. You may need to configure it to use a HTTP proxy.": "", - "minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --driver\n\t- Use --force to override this connectivity check": "", - "minikube profile was successfully set to {{.profile_name}}": "", - "minikube status --output OUTPUT. json, text": "", - "minikube {{.version}} is available! Download it: {{.url}}": "", - "mkcmp is used to compare performance of two minikube binaries": "", - "mount argument \"{{.value}}\" must be in form: \u003csource directory\u003e:\u003ctarget directory\u003e": "", - "mount failed": "", - "namespaces to pause": "", - "namespaces to unpause": "", - "not enough arguments ({{.ArgCount}}).\\nusage: minikube config set PROPERTY_NAME PROPERTY_VALUE": "", - "pause containers": "", - "profile sets the current minikube profile, or gets the current profile if no arguments are provided. This is used to run and manage multiple minikube instance. You can return to the default minikube profile by running `minikube profile default`": "", - "profile {{.name}} is not running.": "", - "reload cached images.": "", + "kubectl proxy": "kubectl proxy", + "libmachine failed": "libmachine が失敗しました", + "logdir set failed": "logdir の値を設定するのに失敗しました", + "max time to wait per Kubernetes core services to be healthy.": "Kubernetes の core サービスが正常に稼働するまで待つ最大時間", + "minikube addons list --output OUTPUT. json, list": "minikube addons list --output OUTPUT. json, list", + "minikube is exiting due to an error. If the above message is not useful, open an issue:": "minikube がエラーで終了しました。もし上のメッセージが不十分であれば、Issue を作成してください", + "minikube is not yet compatible with ChromeOS": "minikube はまだ ChromeOS と互換性がありません", + "minikube is unable to access the Google Container Registry. You may need to configure it to use a HTTP proxy.": "minikube が Google Container Registry に接続できません。 HTTP プロキシを使用するように設定する必要があるかもしれません", + "minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --vm-driver\n\t- Use --force to override this connectivity check": "minikube が VM に接続できませんでした。 {{.error}}\n\n\t考えられる理由は以下の二つです。\n\n\t- VPN 、あるいはファイアウォールによる干渉\n\t- {{.hypervisor}} のネットワークの設定での問題\n\n\t迂回策には以下があります。\n\n\t- ローカルの VPN 、あるいはファイアウォールを無効にする\n\t- {{.ip}} へのアクセスを許可するようにローカルの VPN 、あるいはファイアウォールを設定する\n\t- {{.hypervisor}} を再起動、あるいは再インストールする\n\t- 別の VM ドライバーを使用する\n\t- --force を使用してこの接続チェックを上書きする", + "minikube profile was successfully set to {{.profile_name}}": "{{.profile_name}} の値が minikube のプロフィールに正常に設定されました", + "minikube status --output OUTPUT. json, text": "minikube status --output OUTPUT. json, text", + "minikube {{.version}} is available! Download it: {{.url}}": "minikube {{.version}} が利用可能です! 以下のURLでダウンロードできます。 {{.url}}", + "mkcmp is used to compare performance of two minikube binaries": "mkcmp で二つの minikube のバイナリのパフォーマンスを比較することができます", + "mount argument \"{{.value}}\" must be in form: \u003csource directory\u003e:\u003ctarget directory\u003e": "mount への引数 \"{{.value}}\" は以下のフォーマットである必要があります。\u003cソースディレクトリ\u003e:\u003cターゲットディレクトリ\u003e", + "mount failed": "マウントが失敗しました", + "namespaces to pause": "停止する名前空間", + "namespaces to unpause": "停止を解除する名前空間", + "none driver does not support multi-node clusters": "マルチクラスターをサポートしているドライバーがありません", + "not enough arguments ({{.ArgCount}}).\\nusage: minikube config set PROPERTY_NAME PROPERTY_VALUE": "引数({{.ArgCount}})が少なすぎます。\\n使用方法: minikube config set PROPERTY_NAME PROPERTY_VALUE", + "pause containers": "コンテナを停止させます", + "profile sets the current minikube profile, or gets the current profile if no arguments are provided. This is used to run and manage multiple minikube instance. You can return to the default minikube profile by running `minikube profile default`": "profile で現在の minikube のプロフィールの値を設定することができます。profil に引数を渡さなければ、現在のプロフィールを見ることができます。このコマンドは複数の minikube インスタンスを管理するのに使用されます。「 minikube profile default 」で minikube のデフォルトのプロフィールを見ることができます", + "reload cached images.": "キャッシュしていたイメージから再読み込みをします", "reloads images previously added using the 'cache add' subcommand": "", - "retrieving node": "", - "service {{.namespace_name}}/{{.service_name}} has no node port": "", - "stat failed": "", - "status json failure": "", - "status text failure": "", - "toom any arguments ({{.ArgCount}}).\\nusage: minikube config set PROPERTY_NAME PROPERTY_VALUE": "", - "tunnel creates a route to services deployed with type LoadBalancer and sets their Ingress to their ClusterIP. for a detailed example see https://minikube.sigs.k8s.io/docs/tasks/loadbalancer": "", - "tunnel makes services of type LoadBalancer accessible on localhost": "", - "unable to bind flags": "", - "unable to delete minikube config folder": "", - "unable to set logtostderr": "", - "unpause Kubernetes": "", - "unset failed": "", - "unsets PROPERTY_NAME from the minikube config file. Can be overwritten by flags or environmental variables": "", - "unsets an individual value in a minikube config file": "", - "unsupported or missing driver: {{.name}}": "", - "update config": "", - "usage: minikube addons configure ADDON_NAME": "", - "usage: minikube addons disable ADDON_NAME": "", - "usage: minikube addons enable ADDON_NAME": "", - "usage: minikube addons list": "", - "usage: minikube addons open ADDON_NAME": "", - "usage: minikube config unset PROPERTY_NAME": "", - "usage: minikube delete": "", - "usage: minikube profile [MINIKUBE_PROFILE_NAME]": "", - "zsh completion failed": "", - "{{.driver}} does not appear to be installed": "", - "{{.driver}} does not appear to be installed, but is specified by an existing profile. Please run 'minikube delete' or install {{.driver}}": "", - "{{.extra_option_component_name}}.{{.key}}={{.value}}": "", - "{{.machine}} IP has been updated to point at {{.ip}}": "", - "{{.machine}} IP was already correctly configured for {{.ip}}": "", - "{{.name}} cluster does not exist": "", - "{{.name}} has no available configuration options": "", - "{{.name}} is already running": "", - "{{.name}} was successfully configured": "", - "{{.path}} is version {{.client_version}}, and is incompatible with Kubernetes {{.cluster_version}}. You will need to update {{.path}} or use 'minikube kubectl' to connect with this cluster": "", + "retrieving node": "ノードを取得しています", + "saving node": "ノードを保存しています", + "service {{.namespace_name}}/{{.service_name}} has no node port": "サービス {{.namespace_name}}/{{.service_name}} は NodePort を持っていません", + "startup failed": "起動に失敗しました", + "stat failed": "stat が失敗しました", + "status json failure": "ステータスは JSON エラーです", + "status text failure": "ステータスはテキストエラーです", + "toom any arguments ({{.ArgCount}}).\\nusage: minikube config set PROPERTY_NAME PROPERTY_VALUE": "引数の数({{.ArgCount}})が多すぎます。\\n使用方法: minikube config set PROPERTY_NAME PROPERTY_VALUE", + "tunnel creates a route to services deployed with type LoadBalancer and sets their Ingress to their ClusterIP. for a detailed example see https://minikube.sigs.k8s.io/docs/tasks/loadbalancer": "tunnel によってタイプが LoadBalancer なサービスへのルーティングが作成され、Ingress をサービスの ClusterIP へと向けさせます。より詳細な例は以下を参照してください。https://minikube.sigs.k8s.io/docs/tasks/loadbalancer", + "tunnel makes services of type LoadBalancer accessible on localhost": "tunnel によってタイプが LoadBalancer なサービスが localhost からアクセス可能になります", + "unable to bind flags": "フラグをバインドすることができませんでした", + "unable to delete minikube config folder": "minikube の設定フォルダーを削除できませんでした", + "unable to set logtostderr": "logtostderr を設定することができませんでした", + "unpause Kubernetes": "Kubernetes を再開させます", + "unset failed": "取り消しが失敗しました", + "unsets PROPERTY_NAME from the minikube config file. Can be overwritten by flags or environmental variables": "minikube の設定ファイルから PROPERTY_NAME の値を取り消します。フラグ、あるいは環境変数で上書き可能です", + "unsets an individual value in a minikube config file": "minikube の設定ファイルの個々の値を取り消します", + "unsupported or missing driver: {{.name}}": "サポートしていない、あるいは不足しているドライバーです: {{.name}}", + "update config": "設定を更新します", + "usage: minikube addons configure ADDON_NAME": "使用方法: minikube addons configure ADDON_NAME", + "usage: minikube addons disable ADDON_NAME": "使用方法: minikube addons disable ADDON_NAME", + "usage: minikube addons enable ADDON_NAME": "使用方法: minikube addons enable ADDON_NAME", + "usage: minikube addons list": "使用方法: minikube addons list", + "usage: minikube addons open ADDON_NAME": "使用方法: minikube addons open ADDON_NAME", + "usage: minikube config unset PROPERTY_NAME": "使用方法: minikube config unset PROPERTY_NAME", + "usage: minikube delete": "使用方法: minikube delete", + "usage: minikube profile [MINIKUBE_PROFILE_NAME]": "使用方法: minikube profile [MINIKUBE_PROFILE_NAME]", + "version json failure": "JSON でバージョンを表示するのに失敗しました", + "version yaml failure": "YAML でバージョンを表示するのに失敗しました", + "zsh completion failed": "zsh の補完が失敗しました", + "{{ .name }}: {{ .rejection }}": "{{ .name }}: {{ .rejection }}", + "{{.cluster}} IP has been updated to point at {{.ip}}": "{{.cluster}} の IP アドレスは {{.ip}} へと更新されました", + "{{.cluster}} IP was already correctly configured for {{.ip}}": "{{.cluster}} の IP アドレスは {{.ip}} としてすでに正常に設定されています", + "{{.driver_name}} \"{{.cluster}}\" {{.machine_type}} is missing, will recreate.": "{{.driver_name}} 「 {{.cluster}} 」 {{.machine_type}} がありません。再生成します。", + "{{.driver}} does not appear to be installed": "{{.driver}} がインストールされていないようです", + "{{.driver}} does not appear to be installed, but is specified by an existing profile. Please run 'minikube delete' or install {{.driver}}": "{{.driver}} がインストールされていないようですが、既存のプロフィールから指定されています。「 minikube delete 」を実行、あるいは {{.driver}} をインストールしてください", + "{{.extra_option_component_name}}.{{.key}}={{.value}}": "{{.extra_option_component_name}}.{{.key}}={{.value}}", + "{{.name}} has no available configuration options": "{{.name}} には利用可能なオプションがありません", + "{{.name}} is already running": "{{.name}} はすでに起動しています", + "{{.name}} was successfully configured": "{{.name}} は正常に設定されました", + "{{.path}} is v{{.client_version}}, which may be incompatible with Kubernetes v{{.cluster_version}}.": "{{.path}} のバージョンは {{.client_version}}です。 {{.cluster_version}} の Kubernetes とは互換性がないかもしれません", "{{.prefix}}minikube {{.version}} on {{.platform}}": "{{.platform}} 上の {{.prefix}}minikube {{.version}}", - "{{.type}} is not yet a supported filesystem. We will try anyways!": "", - "{{.url}} is not accessible: {{.error}}": "" + "{{.type}} is not yet a supported filesystem. We will try anyways!": "{{.type}} はまだサポートされていなファイルシステムです。とにかくやってみます!", + "{{.url}} is not accessible: {{.error}}": "{{.url}} はアクセス可能ではありません。 {{.error}}" } \ No newline at end of file diff --git a/translations/ko.json b/translations/ko.json index 784b8736a1..0024eb688e 100644 --- a/translations/ko.json +++ b/translations/ko.json @@ -1,5 +1,7 @@ { "\"The '{{.minikube_addon}}' addon is disabled": "\"The '{{.minikube_addon}}' 이 비활성화되었습니다", + "\"{{.context}}\" context has been updated to point to {{.hostname}}:{{.port}}": "", + "\"{{.machineName}}\" does not exist, nothing to stop": "", "\"{{.name}}\" profile does not exist": "\"{{.name}}\" 프로필이 존재하지 않습니다", "\"{{.name}}\" profile does not exist, trying anyways.": "\"{{.name}}\" 프로필이 존재하지 않습니다, 그럼에도 불구하고 시도합니다", "\"{{.node_name}}\" stopped.": "\"{{.node_name}}\" 이 중단되었습니다", @@ -11,9 +13,8 @@ "'none' driver does not support 'minikube ssh' command": "'none' 드라이버는 'minikube ssh' 커맨드를 지원하지 않습니다", "'{{.driver}}' driver reported an issue: {{.error}}": "'{{.driver}}' 드라이버가 다음 이슈를 기록하였습니다: {{.error}}", "'{{.profile}}' is not running": "'{{.profile}}' 이 실행 중이지 않습니다", - "- {{.profile}}": "", "A VPN or firewall is interfering with HTTP access to the minikube VM. Alternatively, try a different VM driver: https://minikube.sigs.k8s.io/docs/start/": "", - "A firewall is blocking Docker the minikube VM from reaching the internet. You may need to configure it to use a proxy.": "", + "A firewall is blocking Docker the minikube VM from reaching the image repository. You may need to select --image-repository, or use a proxy.": "", "A firewall is interfering with minikube's ability to make outgoing HTTPS requests. You may need to change the value of the HTTPS_PROXY environment variable.": "", "A firewall is likely blocking minikube from reaching the internet. You may need to configure minikube to use a proxy.": "", "A set of apiserver IP Addresses which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine": "", @@ -29,14 +30,15 @@ "Adds a node to the given cluster config, and starts it.": "노드 하나를 주어진 클러스터 컨피그에 추가하고 시작합니다", "Adds a node to the given cluster.": "노드 하나를 주어진 클러스터에 추가합니다", "Advanced Commands:": "고급 커맨드:", - "After minikube restart the dockerd ports might have changed. To ensure docker-env works properly.\nPlease re-eval the docker-env command:\n\n\t'minikube -p {{.profile_name}} docker-env'": "", "Aliases": "", "Allow user prompts for more information": "많은 정보를 위해 사용자 프롬프트를 허가합니다", "Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to \\\"auto\\\" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers": "", - "Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "minikube 가상 머신에 할당할 RAM 의 용량 (format: [], where unit = b, k, m or g)", + "Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "minikube 가상 머신에 할당할 RAM 의 용량 (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g)", + "Amount of RAM to allocate to Kubernetes (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "", "Amount of time to wait for a service in seconds": "", "Amount of time to wait for service in seconds": "", "Another hypervisor, such as VirtualBox, is conflicting with KVM. Please stop the other hypervisor, or use --driver to switch to it.": "VirtualBox 와 같은 또 다른 하이퍼바이저가 KVM 과 충돌이 발생합니다. 다른 하이퍼바이저를 중단하거나 --driver 로 변경하세요", + "Another program is using a file required by minikube. If you are using Hyper-V, try stopping the minikube VM from within the Hyper-V manager": "", "Automatically selected the {{.driver}} driver": "자동적으로 {{.driver}} 드라이버가 선택되었습니다", "Automatically selected the {{.driver}} driver. Other choices: {{.alternates}}": "자동적으로 {{.driver}} 드라이버가 선택되었습니다. 다른 드라이버 목록: {{.alternates}}", "Available Commands": "사용 가능한 커맨드", @@ -44,14 +46,17 @@ "Because you are using docker driver on Mac, the terminal needs to be open to run it.": "", "Bind Address: {{.Address}}": "", "Block until the apiserver is servicing API requests": "apiserver 가 API 요청을 서비스할 때까지 막습니다", + "Both driver={{.driver}} and vm-driver={{.vmd}} have been set.\n\n Since vm-driver is deprecated, minikube will default to driver={{.driver}}.\n\n If vm-driver is set in the global config, please run \"minikube config unset vm-driver\" to resolve this warning.": "", "Cannot find directory {{.path}} for mount": "마운트하기 위한 디렉토리 {{.path}} 를 찾을 수 없습니다", "Cannot use both --output and --format options": "--output 과 --format 옵션을 함께 사용할 수 없습니다", "Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start": "", "Check that SELinux is disabled, and that the provided apiserver flags are valid": "", "Check that minikube is running and that you have specified the correct namespace (-n flag) if required.": "minikube 가 실행 중인지 그리고 정확한 네임스페이스를 (-n 플래그로) 명시하였는지 확인하세요", "Check that the provided apiserver flags are valid": "주어진 apiserver 플래그가 유효한지 확인하세요", + "Check that the provided apiserver flags are valid, and that SELinux is disabled": "", "Check that your --kubernetes-version has a leading 'v'. For example: 'v1.1.14'": "입력한 --kubernetes-version 이 'v'로 시작하는지 확인하세요. 예시: 'v1.1.14'", "Check your firewall rules for interference, and run 'virt-host-validate' to check for KVM configuration issues. If you are running minikube within a VM, consider using --driver=none": "", + "Choose a smaller value for --memory, such as 2000": "", "Configuration and Management Commands:": "환경 설정 및 관리 커맨드:", "Configure a default route on this Linux host, or use another --driver that does not require it": "", "Configure an external network switch following the official documentation, then add `--hyperv-virtual-switch=\u003cswitch-name\u003e` to `minikube start`": "", @@ -62,9 +67,10 @@ "Could not process error from failed deletion": "", "Could not process errors from failed deletion": "", "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.": "", - "Creating Kubernetes in {{.driver_name}} container with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "", + "Creating Kubernetes in {{.driver_name}} {{.machine_type}} with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "{{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) 에 쿠버네티스를 설치하는 중 ...", "Creating mount {{.name}} ...": "", "Creating {{.driver_name}} VM (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "{{.driver_name}} VM (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) 를 생성하는 중 ...", + "Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "{{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) 를 생성하는 중 ...", "DEPRECATED, use `driver` instead.": "DEPRECATED 되었습니다, 'driver' 를 사용하세요", "Default group id used for the mount": "마운트를 위한 디폴트 group id", "Default user id used for the mount": "마운트를 위한 디폴트 user id", @@ -89,10 +95,11 @@ "Documentation: {{.url}}": "문서: {{.url}}", "Done! kubectl is now configured to use \"{{.name}}\"": "끝났습니다! 이제 kubectl 이 \"{{.name}}\" 를 사용할 수 있도록 설정되었습니다", "Download complete!": "다운로드가 성공하였습니다!", + "Downloading Kubernetes {{.version}} preload ...": "", "Downloading VM boot image ...": "가상 머신 부트 이미지 다운로드 중 ...", "Downloading driver {{.driver}}:": "드라이버 {{.driver}} 다운로드 중 :", - "Downloading preloaded images tarball for k8s {{.version}} ...": "", "Downloading {{.name}} {{.version}}": "{{.name}} {{.version}} 다운로드 중", + "Due to {{.driver_name}} networking limitations on {{.os_name}}, {{.addon_name}} addon is not supported for this driver.\nAlternatively to use this addon you can use a vm-based driver:\n\n\t'minikube start --vm=true'\n\nTo track the update on this work in progress feature please check:\nhttps://github.com/kubernetes/minikube/issues/7332": "", "ERROR creating `registry-creds-acr` secret": "registry-creds-acr` secret 생성 오류", "ERROR creating `registry-creds-dpr` secret": "`registry-creds-dpr` secret 생성 오류", "ERROR creating `registry-creds-ecr` secret: {{.error}}": "`registry-creds-ecr` secret 생성 오류: {{.error}}", @@ -101,7 +108,6 @@ "Enable addons. see `minikube addons list` for a list of valid addon names.": "", "Enable experimental NVIDIA GPU support in minikube": "", "Enable host resolver for NAT DNS requests (virtualbox driver only)": "", - "Enable istio needs {{.minMem}} MB of memory and {{.minCpus}} CPUs.": "", "Enable proxy for NAT DNS requests (virtualbox driver only)": "", "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \\\"--network-plugin=cni\\\".": "", "Enables the addon w/ADDON_NAME within minikube (example: minikube addons enable dashboard). For a list of available addons use: minikube addons list": "", @@ -128,19 +134,16 @@ "Error getting cluster bootstrapper": "클러스터 부트스트래퍼 조회 오류", "Error getting cluster config": "클러스터 컨피그 조회 오류", "Error getting config": "컨피그 조회 오류", - "Error getting control plane": "", "Error getting host": "호스트 조회 오류", "Error getting host IP": "호스트 IP 조회 오류", "Error getting host status": "호스트 상태 조회 오류", "Error getting machine logs": "머신 로그 조회 오류", "Error getting port binding for '{{.driver_name}} driver: {{.error}}": "", "Error getting primary control plane": "", - "Error getting primary cp": "", "Error getting service status": "서비스 상태 조회 오류", "Error getting service with namespace: {{.namespace}} and labels {{.labelName}}:{{.addonName}}: {{.error}}": "", "Error getting ssh client": "ssh 클라이언트 조회 오류", "Error getting the host IP address to use from within the VM": "", - "Error host driver ip status": "", "Error killing mount process": "", "Error loading api": "api 로딩 오류", "Error loading profile config": "프로필 컨피그 로딩 오류", @@ -148,15 +151,14 @@ "Error opening service": "", "Error parsing minikube version: {{.error}}": "minikube 버전 파싱 오류: {{.error}}", "Error reading {{.path}}: {{.error}}": "", - "Error retrieving node": "", "Error starting cluster": "클러스터 시작 오류", "Error starting mount": "마운트 시작 오류", "Error starting node": "노드 시작 오류", "Error while setting kubectl current context : {{.error}}": "kubectl current context 설정 오류 : {{.error}}", "Error writing mount pid": "", - "Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}\"": "", - "Error: [{{.id}}] {{.error}}": "", "Examples": "예시", + "Executing \"{{.command}}\" took an unusually long time: {{.duration}}": "", + "Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'": "", "Exiting": "", "Exiting.": "", "External Adapter on which external switch will be created if no external switch is found. (hyperv driver only)": "", @@ -164,17 +166,20 @@ "Failed to cache ISO": "ISO 캐싱에 실패하였습니다", "Failed to cache and load images": "이미지 캐싱 및 로딩에 실패하였습니다", "Failed to cache binaries": "바이너리 캐싱에 실패하였습니다", + "Failed to cache images": "", "Failed to cache images to tar": "이미지를 tar 로 캐싱하는 데 실패하였습니다", "Failed to cache kubectl": "kubectl 캐싱에 실패하였습니다", "Failed to change permissions for {{.minikube_dir_path}}: {{.error}}": "{{.minikube_dir_path}} 의 권한 변경에 실패하였습니다: {{.error}}", "Failed to check if machine exists": "머신이 존재하는지 확인하는 데 실패하였습니다", "Failed to check main repository and mirrors for images for images": "", + "Failed to delete cluster {{.name}}, proceeding with retry anyway.": "", "Failed to delete cluster: {{.error}}": "클러스터 제거에 실패하였습니다: {{.error}}", "Failed to delete images": "이미지 제거에 실패하였습니다", "Failed to delete images from config": "컨피그로부터 이미지 제거에 실패하였습니다", "Failed to delete node {{.name}}": "노드 {{.name}} 제거에 실패하였습니다", "Failed to enable container runtime": "컨테이너 런타임 활성화에 실패하였습니다", "Failed to generate config": "컨피그 생성에 실패하였습니다", + "Failed to get API Server URL": "", "Failed to get bootstrapper": "부트스트래퍼 조회에 실패하였습니다", "Failed to get command runner": "", "Failed to get driver URL": "드라이버 URL 조회에 실패하였습니다", @@ -192,16 +197,21 @@ "Failed to stop node {{.name}}": "노드 {{.name}} 중지에 실패하였습니다", "Failed to update cluster": "클러스터를 수정하는 데 실패하였습니다", "Failed to update config": "컨피그를 수정하는 데 실패하였습니다", + "Failed to validate '{{.driver}}' driver": "", "Failed unmount: {{.error}}": "마운트 해제에 실패하였습니다: {{.error}}", "File permissions used for the mount": "", + "Filter to use only VM Drivers": "", "Flags": "", "Follow": "", "For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "", "For more information, see:": "더 많은 정보를 보려면, 다음을 참고하세요:", + "For more information, see: https://minikube.sigs.k8s.io/docs/reference/drivers/none/": "", "Force environment to be configured for a specified shell: [fish, cmd, powershell, tcsh, bash, zsh], default is auto-detect": "", "Force minikube to perform possibly dangerous operations": "", "Found network options:": "네트워크 옵션을 찾았습니다", "Found {{.number}} invalid profile(s) !": "{{.number}} 개의 무효한 프로필을 찾았습니다", + "Generate unable to parse disk size '{{.diskSize}}': {{.error}}": "", + "Generate unable to parse memory '{{.memory}}': {{.error}}": "", "Gets the kubernetes URL(s) for the specified service in your local cluster": "", "Gets the kubernetes URL(s) for the specified service in your local cluster. In the case of multiple URLs they will be printed one at a time.": "", "Gets the logs of the running instance, used for debugging minikube, not user code.": "", @@ -219,6 +229,8 @@ "Hyperkit is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "", "Hyperkit networking is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "", "If set, automatically updates drivers to the latest version. Defaults to true.": "", + "If set, delete the current cluster if start fails and try again. Defaults to false.": "", + "If set, download tarball of preloaded images if available to improve start time. Defaults to true.": "", "If set, install addons. Defaults to true.": "", "If set, pause all namespaces": "", "If set, unpause all namespaces": "", @@ -233,8 +245,9 @@ "Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added.": "", "Install VirtualBox, or select an alternative value for --driver": "", "Install the latest hyperkit binary, and run 'minikube delete'": "", - "Invalid size passed in argument: {{.error}}": "", "IsEnabled failed": "", + "Istio needs {{.minCPUs}} CPUs -- your configuration only allocates {{.cpus}} CPUs": "", + "Istio needs {{.minMem}}MB of memory -- your configuration only allocates {{.memory}}MB": "", "Kill the mount process spawned by minikube start": "", "Kubernetes {{.new}} is now available. If you would like to upgrade, specify: --kubernetes-version={{.new}}": "", "Kubernetes {{.version}} is not supported by this release of minikube": "", @@ -248,14 +261,15 @@ "Lists the URLs for the services in your local cluster": "", "Local folders to share with Guest via NFS mounts (hyperkit driver only)": "", "Location of the VPNKit socket used for networking. If empty, disables Hyperkit VPNKitSock, if 'auto' uses Docker for Mac VPNKit connection, otherwise uses the specified VSock (hyperkit driver only)": "", - "Location of the minikube iso.": "", + "Locations to fetch the minikube ISO from.": "", "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'": "", "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'.": "", "Message Size: {{.size}}": "", - "Minikube is a CLI tool that provisions and manages single-node Kubernetes clusters optimized for development workflows.": "", - "Minikube is a tool for managing local Kubernetes clusters.": "", + "Minikube is a CLI tool that provisions and manages single-node Kubernetes clusters optimized for development workflows.": "Minikube 는 개발용으로 최적화된 싱글 노드 쿠버네티스 클러스터 제공 및 관리 CLI 툴입니다", + "Minikube is a tool for managing local Kubernetes clusters.": "Minikube 는 로컬 쿠버네티스 클러스터 관리 툴입니다", "Modify minikube config": "", "Modify minikube's kubernetes addons": "", + "Most users should use the newer 'docker' driver instead, which does not require root!": "", "Mount type: {{.name}}": "", "Mounting host path {{.sourcePath}} into VM as {{.destinationPath}} ...": "", "Mounts the specified directory into minikube": "특정 디렉토리를 minikube 에 마운트합니다", @@ -265,17 +279,22 @@ "NIC Type used for host only network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only)": "", "NOTE: This process must stay alive for the mount to be accessible ...": "", "Networking and Connectivity Commands:": "", + "No changes required for the \"{{.context}}\" context": "", "No minikube profile was found. You can create one using `minikube start`.": "", - "Node may be unable to resolve external DNS records": "", + "Node \"{{.node_name}}\" stopped.": "", "Node operations": "", + "Node {{.name}} failed to start, deleting and trying again.": "", "Node {{.name}} was successfully deleted.": "", + "Node {{.nodeName}} does not exist.": "", + "Non-destructive downgrades are not supported, but you can proceed with one of the following options:\n\n 1) Recreate the cluster with Kubernetes v{{.new}}, by running:\n\n minikube delete{{.profile}}\n minikube start{{.profile}} --kubernetes-version={{.new}}\n\n 2) Create a second cluster with Kubernetes v{{.new}}, by running:\n\n minikube start -p {{.suggestedName}} --kubernetes-version={{.new}}\n\n 3) Use the existing cluster at version Kubernetes v{{.old}}, by running:\n\n minikube start{{.profile}} --kubernetes-version={{.old}}": "", "None of the known repositories in your location are accessible. Using {{.image_repository_name}} as fallback.": "", "None of the known repositories is accessible. Consider specifying an alternative image repository with --image-repository flag": "", "Not passing {{.name}}={{.value}} to docker env.": "", - "Noticed that you are using minikube docker-env:": "", - "Number of CPUs allocated to the minikube VM.": "", + "Noticed you have an activated docker-env on {{.driver_name}} driver in this terminal:": "", + "Number of CPUs allocated to Kubernetes.": "", "Number of lines back to go within the log": "", "OS release is {{.pretty_name}}": "", + "One of 'yaml' or 'json'.": "", "Open the addons URL with https instead of http": "", "Open the service URL with https instead of http": "", "Opening kubernetes service {{.namespace_name}}/{{.service_name}} in default browser...": "", @@ -294,46 +313,58 @@ "Please install the minikube hyperkit VM driver, or select an alternative --driver": "", "Please install the minikube kvm2 VM driver, or select an alternative --driver": "", "Please make sure the service you are looking for is deployed or is in the correct namespace.": "", + "Please re-eval your docker-env, To ensure your environment variables have updated ports: \n\n\t'minikube -p {{.profile_name}} docker-env'": "", "Please specify the directory to be mounted: \n\tminikube mount \u003csource directory\u003e:\u003ctarget directory\u003e (example: \"/host-home:/vm-home\")": "", "Populates the specified folder with documentation in markdown about minikube": "", "Powering off \"{{.profile_name}}\" via SSH ...": "", - "Preparing Kubernetes {{.k8sVersion}} on {{.runtime}} {{.runtimeVersion}} ...": "", + "Preparing Kubernetes {{.k8sVersion}} on {{.runtime}} {{.runtimeVersion}} ...": "쿠버네티스 {{.k8sVersion}} 을 {{.runtime}} {{.runtimeVersion}} 런타임으로 설치하는 중", "Print current and latest version number": "현재 그리고 최신 버전을 출력합니다", + "Print just the version number.": "", "Print the version of minikube": "minikube 의 버전을 출력합니다", "Print the version of minikube.": "minikube 의 버전을 출력합니다.", "Problems detected in {{.entry}}:": "", "Problems detected in {{.name}}:": "", "Profile gets or sets the current minikube profile": "", - "Profile name \"{{.profilename}}\" is minikube keyword. To delete profile use command minikube delete -p \u003cprofile name\u003e": "", + "Profile name \"{{.profilename}}\" is reserved keyword. To delete this profile, run: \"{{.cmd}}\"": "", "Provide VM UUID to restore MAC address (hyperkit driver only)": "", + "Pulling base image ...": "", "Reboot to complete VirtualBox installation, verify that VirtualBox is not blocked by your system, and/or use another hypervisor": "", "Rebuild libvirt with virt-network support": "", "Received {{.name}} signal": "", - "Reconfiguring existing host ...": "", "Registry mirrors to pass to the Docker daemon": "", "Reinstall VirtualBox and reboot. Alternatively, try the kvm2 driver: https://minikube.sigs.k8s.io/docs/reference/drivers/kvm2/": "", "Reinstall VirtualBox and verify that it is not blocked: System Preferences -\u003e Security \u0026 Privacy -\u003e General -\u003e Some system software was blocked from loading": "", + "Related issue: {{.url}}": "", "Related issues:": "", - "Removed all traces of the \"{{.name}}\" cluster.": "", + "Remove the incompatible --docker-opt flag if one was provided": "", + "Removed all traces of the \"{{.name}}\" cluster.": "\"{{.name}}\" 클러스터 관련 정보가 모두 삭제되었습니다", "Removing {{.directory}} ...": "", "Requested cpu count {{.requested_cpus}} is less than the minimum allowed of {{.minimum_cpus}}": "", "Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}": "", - "Requested memory allocation ({{.memory}}MB) is less than the default memory allocation of {{.default_memorysize}}MB. Beware that minikube might not work correctly or crash unexpectedly.": "", - "Requested memory allocation {{.requested_size}} is less than the minimum allowed of {{.minimum_size}}": "", + "Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommended}}MB. Kubernetes may crash unexpectedly.": "", + "Requested memory allocation {{.requested}}MB is less than the usable minimum of {{.minimum}}MB": "", + "Restart Docker": "", + "Restarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "", + "Restarting the {{.name}} service may improve performance.": "", "Retrieve the ssh identity key path of the specified cluster": "", "Retrieve the ssh identity key path of the specified cluster.": "", "Retrieves the IP address of the running cluster": "", "Retrieves the IP address of the running cluster, and writes it to STDOUT.": "", "Retrieves the IP address of the running cluster, checks it\n\t\t\twith IP in kubeconfig, and corrects kubeconfig if incorrect.": "", "Returns the value of PROPERTY_NAME from the minikube config file. Can be overwritten at runtime by flags or environmental variables.": "", + "Right-click the PowerShell icon and select Run as Administrator to open PowerShell in elevated mode.": "", "Run 'kubectl describe pod coredns -n kube-system' and check for a firewall or DNS conflict": "", "Run 'minikube delete' to delete the stale VM, or and ensure that minikube is running as the same user you are issuing this command with": "", + "Run 'sudo sysctl fs.protected_regular=1', or try a driver which does not require root, such as '--driver=docker'": "", "Run kubectl": "kubectl 을 실행합니다", "Run minikube from the C: drive.": "", "Run the kubernetes client, download it if necessary. Remember -- after kubectl!\n\nExamples:\nminikube kubectl -- --help\nminikube kubectl -- get pods --namespace kube-system": "", "Run the minikube command as an Administrator": "minikube 명령어를 관리자 권한으로 실행합니다", "Run: 'chmod 600 $HOME/.kube/config'": "", + "Run: 'kubectl delete clusterrolebinding kubernetes-dashboard'": "", + "Run: 'sudo mkdir /sys/fs/cgroup/systemd \u0026\u0026 sudo mount -t cgroup -o none,name=systemd cgroup /sys/fs/cgroup/systemd'": "", "Running on localhost (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", + "Service '{{.service}}' was not found in '{{.namespace}}' namespace.\nYou may select another namespace by using 'minikube service {{.service}} -n \u003cnamespace\u003e'. Or list out all the services using 'minikube service list'": "", "Set failed": "설정이 실패하였습니다", "Set flag to delete all profiles": "", "Set this flag to delete the '.minikube' folder from your user directory.": "", @@ -348,6 +379,7 @@ "Show only log entries which point to known problems": "", "Show only the most recent journal entries, and continuously print new entries as they are appended to the journal.": "", "Skipped switching kubectl context for {{.profile_name}} because --keep-context was set.": "", + "Sorry, Kubernetes v{{.k8sVersion}} requires conntrack to be installed in root's path": "", "Sorry, Kubernetes {{.version}} is not supported by this release of minikube": "죄송합니다, 쿠버네티스 {{.version}} 는 해당 minikube 버전에서 지원하지 않습니다", "Sorry, completion support is not yet implemented for {{.name}}": "", "Sorry, the kubeadm.{{.parameter_name}} parameter is currently not supported by --extra-config": "", @@ -359,8 +391,11 @@ "Specify the 9p version that the mount should use": "", "Specify the ip that the mount should be setup on": "", "Specify the mount filesystem type (supported types: 9p)": "", - "Starting existing {{.driver_name}} VM for \"{{.profile_name}}\" ...": "", + "Start failed after cluster deletion": "", + "StartHost failed, but will try again: {{.error}}": "", + "Starting control plane node {{.name}} in cluster {{.cluster}}": "", "Starting node": "노드를 시작하는 중", + "Starting node {{.name}} in cluster {{.cluster}}": "", "Starting tunnel for service {{.service}}.": "", "Starts a local kubernetes cluster": "로컬 쿠버네티스 클러스터를 시작합니다", "Starts a node.": "노드를 시작합니다", @@ -373,14 +408,14 @@ "Successfully added {{.name}} to {{.cluster}}!": "{{.name}} 를 {{.cluster}} 에 성공적으로 추가하였습니다!", "Successfully deleted all profiles": "모든 프로필이 성공적으로 삭제되었습니다", "Successfully mounted {{.sourcePath}} to {{.destinationPath}}": "", - "Successfully powered off Hyper-V. minikube driver -- {{.driver}}": "", "Successfully purged minikube directory located at - [{{.minikubeDirectory}}]": "", "Suggestion: {{.advice}}": "", "Suggestion: {{.fix}}": "", "Target directory {{.path}} must be an absolute path": "타겟 폴더 {{.path}} 는 절대 경로여야 합니다", "The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube --driver={{.driver_name}}'.": "\"{{.driver_name}}\" 드라이버는 root 권한으로 실행되어야 합니다. minikube 를 다음과 같이 실행하세요 'sudo minikube --driver={{.driver_name}}'", + "The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube start --driver={{.driver_name}}'.": "", "The \"{{.driver_name}}\" driver should not be used with root privileges.": "\"{{.driver_name}}\" 드라이버는 root 권한으로 실행되면 안 됩니다", - "The 'none' driver provides limited isolation and may reduce system security and reliability.": "", + "The 'none' driver is designed for experts who need to integrate with an existing VM": "", "The '{{.addonName}}' addon is enabled": "", "The '{{.driver}}' driver requires elevated permissions. The following commands will be executed:\\n\\n{{ .example }}\\n": "", "The '{{.name}} driver does not support multiple profiles: https://minikube.sigs.k8s.io/docs/reference/drivers/none/": "", @@ -395,28 +430,35 @@ "The VM driver exited with an error, and may be corrupt. Run 'minikube start' with --alsologtostderr -v=8 to see the error": "", "The VM that minikube is configured for no longer exists. Run 'minikube delete'": "", "The apiserver listening port": "", - "The apiserver name which is used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine": "", "The argument to pass the minikube mount command on start.": "", + "The authoritative apiserver hostname for apiserver certificates and connectivity. This can be used if you want to make the apiserver available from outside the machine": "", "The cluster dns domain name used in the kubernetes cluster": "", "The container runtime to be used (docker, crio, containerd).": "", + "The control plane for \"{{.name}}\" is paused!": "", + "The control plane node \"{{.name}}\" does not exist.": "", + "The control plane node is not running (state={{.state}})": "", + "The control plane node must be running for this command": "", "The cri socket path to be used.": "", - "The docker service within '{{.profile}}' is not active": "", - "The driver {{.experimental}} '{{.driver}}' is not supported on {{.os}}": "", - "The existing \"{{.profile_name}}\" VM that was created using the \"{{.old_driver}}\" driver, and is incompatible with the \"{{.driver}}\" driver.": "", + "The docker service within '{{.name}}' is not active": "", + "The docker-env command is only compatible with the \"docker\" runtime, but this cluster was configured to use the \"{{.runtime}}\" runtime.": "", + "The driver '{{.driver}}' is not supported on {{.os}}": "", + "The existing \"{{.name}}\" VM was created using the \"{{.old}}\" driver, and is incompatible with the \"{{.new}}\" driver.": "", "The hyperv virtual switch name. Defaults to first found. (hyperv driver only)": "", "The hypervisor does not appear to be configured properly. Run 'minikube start --alsologtostderr -v=1' and inspect the error code": "", "The initial time interval for each check that wait performs in seconds": "", - "The kubernetes version that the minikube VM will use (ex: v1.2.3)": "", "The machine-driver specified is failing to start. Try running 'docker-machine-driver-\u003ctype\u003e version'": "", "The minikube VM is offline. Please run 'minikube start' to start it again.": "", "The name of the network plugin.": "", "The name of the node to delete": "", "The name of the node to start": "", "The node to get logs from. Defaults to the primary control plane.": "", + "The node to ssh into. Defaults to the primary control plane.": "", + "The none driver is not compatible with multi-node clusters.": "", "The number of bytes to use for 9p packet payload": "", + "The number of nodes to spin up. Defaults to 1.": "", "The output format. One of 'json', 'table'": "", "The path on the file system where the docs in markdown need to be saved": "", - "The podman service within '{{.profile}}' is not active": "", + "The podman service within '{{.cluster}}' is not active": "", "The service namespace": "", "The service {{.service}} requires privileged ports to be exposed: {{.ports}}": "", "The services namespace": "", @@ -424,40 +466,59 @@ "The value passed to --format is invalid": "", "The value passed to --format is invalid: {{.error}}": "", "The vmwarefusion driver is deprecated and support for it will be removed in a future release.\n\t\t\tPlease consider switching to the new vmware unified driver, which is intended to replace the vmwarefusion driver.\n\t\t\tSee https://minikube.sigs.k8s.io/docs/reference/drivers/vmware/ for more information.\n\t\t\tTo disable this message, run [minikube config set ShowDriverDeprecationNotification false]": "", + "There is no local cluster named \"{{.cluster}}\"": "", "These changes will take effect upon a minikube delete and then a minikube start": "", "This addon does not have an endpoint defined for the 'addons open' command.\nYou can add one by annotating a service with the label {{.labelName}}:{{.addonName}}": "", "This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true": "", + "This control plane is not running! (state={{.state}})": "", + "This driver does not yet work on your architecture. Maybe try --driver=none": "", + "This is unusual - you may want to investigate using \"{{.command}}\"": "", "This will keep the existing kubectl context and will create a minikube context.": "", "This will start the mount daemon and automatically mount files into minikube.": "", - "Tip: To remove this root owned cluster, run: sudo {{.cmd}} delete": "", + "This {{.type}} is having trouble accessing https://{{.repository}}": "", + "Tip: To remove this root owned cluster, run: sudo {{.cmd}}": "", "To connect to this cluster, use: kubectl --context={{.name}}": "", "To connect to this cluster, use: kubectl --context={{.profile_name}}": "", "To disable this notice, run: 'minikube config set WantUpdateNotification false'\\n": "", - "To proceed, either:\n\n 1) Delete the existing \"{{.profile_name}}\" cluster using: '{{.command}} delete'\n\n * or *\n\n 2) Start the existing \"{{.profile_name}}\" cluster using: '{{.command}} start --driver={{.old_driver}}'": "", + "To fix this, run: \"{{.command}}\"": "", + "To proceed, either:\n\n1) Delete the existing \"{{.name}}\" cluster using: '{{.delcommand}}'\n\n* or *\n\n2) Start the existing \"{{.name}}\" cluster using: '{{.command}} --driver={{.old}}'": "", + "To pull new external images, you may need to configure a proxy: https://minikube.sigs.k8s.io/docs/reference/networking/proxy/": "", "To see addons list for other profiles use: `minikube addons -p name list`": "", - "To start minikube with HyperV Powershell must be in your PATH`": "", + "To start minikube with Hyper-V, Powershell must be in your PATH`": "", "To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:": "", "Troubleshooting Commands:": "", + "Try 'minikube delete' to force new SSL certificates to be installed": "", + "Try 'minikube delete', and disable any conflicting VPN or firewall software": "", + "Try specifying a --driver, or see https://minikube.sigs.k8s.io/docs/start/": "", "Trying to delete invalid profile {{.profile}}": "무효한 프로필 {{.profile}} 를 삭제하는 중", "Unable to bind flags": "flags 를 합칠 수 없습니다", - "Unable to determine a default driver to use. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/": "", "Unable to enable dashboard": "대시보드를 활성화할 수 없습니다", "Unable to fetch latest version info": "최신 버전 정보를 가져올 수 없습니다", + "Unable to find control plane": "", "Unable to generate docs": "문서를 생성할 수 없습니다", "Unable to generate the documentation. Please ensure that the path specified is a directory, exists \u0026 you have permission to write to it.": "", "Unable to get VM IP address": "가상 머신 IP 주소를 조회할 수 없습니다", "Unable to get addon status for {{.name}}: {{.error}}": "", + "Unable to get command runner": "", + "Unable to get control plane status: {{.error}}": "", "Unable to get current user": "현재 사용자를 조회할 수 없습니다", + "Unable to get forwarded endpoint": "", + "Unable to get machine status": "", "Unable to get runtime": "런타임을 조회할 수 없습니다", "Unable to get the status of the {{.name}} cluster.": "{{.name}} 클러스터의 상태를 조회할 수 없습니다", "Unable to kill mount process: {{.error}}": "마운트 프로세스를 중지할 수 없습니다: {{.error}}", "Unable to load cached images from config file.": "컨피그 파일로부터 캐시된 이미지를 로드할 수 없습니다", "Unable to load cached images: {{.error}}": "캐시된 이미지를 로드할 수 없습니다: {{.error}}", "Unable to load config: {{.error}}": "컨피그를 로드할 수 없습니다: {{.error}}", + "Unable to load host": "", "Unable to parse \"{{.kubernetes_version}}\": {{.error}}": " \"{{.kubernetes_version}}\" 를 파싱할 수 없습니다: {{.error}}", "Unable to parse default Kubernetes version from constants: {{.error}}": "", + "Unable to parse memory '{{.memory}}': {{.error}}": "", "Unable to parse oldest Kubernetes version from constants: {{.error}}": "", + "Unable to pick a default driver. Here is what was considered, in preference order:": "", + "Unable to remove machine directory": "", "Unable to remove machine directory: %v": "머신 디렉토리를 제거할 수 없습니다: %v", + "Unable to restart cluster, will reset it: {{.error}}": "", "Unable to start VM. Please investigate and run 'minikube delete' if possible": "가상 머신을 시작할 수 없습니다. 확인 후 가능하면 'minikube delete' 를 실행하세요", "Unable to stop VM": "가상 머신을 중지할 수 없습니다", "Unable to update {{.driver}} driver: {{.error}}": "{{.driver}} 를 수정할 수 없습니다: {{.error}}", @@ -469,6 +530,7 @@ "Unset the KUBECONFIG environment variable, or verify that it does not point to an empty or otherwise invalid path": "", "Unset variables instead of setting them": "", "Update server returned an empty list": "", + "Updating the running {{.driver_name}} \"{{.cluster}}\" {{.machine_type}} ...": "", "Upgrade to QEMU v3.1.0+, run 'virt-host-validate', or ensure that you are not running in a nested VM environment.": "", "Usage": "", "Usage: minikube completion SHELL": "", @@ -480,7 +542,7 @@ "Usage: minikube node stop [name]": "", "Use \"{{.CommandPath}} [command] --help\" for more information about a command.": "", "Use 'kubect get po -A' to find the correct and namespace name": "", - "Use -A to specify all namespaces": "", + "Use -A to specify all namespaces": "모든 namespace 를 확인하려면 -A 를 사용하세요", "Use VirtualBox to remove the conflicting VM and/or network interfaces": "", "Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'.": "", "User ID: {{.userID}}": "", @@ -488,10 +550,9 @@ "Userspace file server:": "", "Using image repository {{.name}}": "", "Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!": "", - "Using the running {{.driver_name}} \"{{.profile_name}}\" VM ...": "", - "Using the {{.driver}} driver based on existing profile": "", - "Using the {{.driver}} driver based on user configuration": "", - "VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository": "", + "Using the {{.driver}} driver based on existing profile": "기존 프로필에 기반하여 {{.driver}} 드라이버를 사용하는 중", + "Using the {{.driver}} driver based on user configuration": "유저 환경 설정 정보에 기반하여 {{.driver}} 드라이버를 사용하는 중", + "Validation unable to parse disk size '{{.diskSize}}': {{.error}}": "", "Verify that your HTTP_PROXY and HTTPS_PROXY environment variables are set correctly.": "", "Verify the IP address of the running cluster in kubeconfig.": "", "Verifying dashboard health ...": "", @@ -502,33 +563,39 @@ "VirtualBox is broken. Disable real-time anti-virus software, reboot, and reinstall VirtualBox if the problem continues.": "", "VirtualBox is broken. Reinstall VirtualBox, reboot, and run 'minikube delete'.": "", "VirtualBox is unable to find its network interface. Try upgrading to the latest release and rebooting.": "", - "Virtualization support is disabled on your computer. If you are running minikube within a VM, try '--driver=none'. Otherwise, consult your systems BIOS manual for how to enable virtualization.": "", - "Wait failed": "", + "Virtualization support is disabled on your computer. If you are running minikube within a VM, try '--driver=docker'. Otherwise, consult your systems BIOS manual for how to enable virtualization.": "", "Wait failed: {{.error}}": "", "Waiting for cluster to come online ...": "클러스터가 사용 가능하기까지 기다리는 중 ...", "Where to root the NFS Shares, defaults to /nfsshares (hyperkit driver only)": "", "Whether to use external switch over Default Switch if virtual switch not explicitly specified. (hyperv driver only)": "", "You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details": "", + "You can also use 'minikube kubectl -- get pods' to invoke a matching version": "맞는 버전의 kubectl 을 사용하기 위해서는 다음과 같이 사용 가능합니다. minikube kubectl -- get pods'", "You can delete them using the following command(s):": "다음 커맨드(들)을 사용하여 제거할 수 있습니다", + "You cannot change the CPUs for an exiting minikube cluster. Please first delete the cluster.": "", + "You cannot change the Disk size for an exiting minikube cluster. Please first delete the cluster.": "", + "You cannot change the memory size for an exiting minikube cluster. Please first delete the cluster.": "", + "You have selected Kubernetes v{{.new}}, but the existing cluster is running Kubernetes v{{.old}}": "", "You may need to manually remove the \"{{.name}}\" VM from your hypervisor": "", "You may need to stop the Hyper-V Manager and run `minikube delete` again.": "", "You must specify a service name": "service 이름을 명시해야 합니다", "Your host does not support KVM virtualization. Ensure that qemu-kvm is installed, and run 'virt-host-validate' to debug the problem": "호스트가 KVM 가상화를 지원하지 않습니다. qemu-kvm 이 설치되었는지 확인 후, 문제 디버그를 위해 'virt-host-validate' 를 실행하세요", + "Your host does not support virtualization. If you are running minikube within a VM, try '--driver=docker'. Otherwise, enable virtualization in your BIOS": "", "Your host does not support virtualization. If you are running minikube within a VM, try '--driver=none'. Otherwise, enable virtualization in your BIOS": "호스트가 가상화를 지원하지 않습니다. 가상 머신 안에서 minikube 를 실행 중인 경우, '--driver=none' 로 시도하세요. 그렇지 않다면, BIOS 에서 가상화를 활성화하세요", "Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.": "", "Your minikube config refers to an unsupported driver. Erase ~/.minikube, and try again.": "minikube config 가 미지원 드라이버를 참조하고 있습니다. ~/.minikube 를 제거한 후, 다시 시도하세요", "Your minikube vm is not running, try minikube start.": "minikube 가상 머신이 실행 중이 아닙니다, minikube start 를 시도하세요", + "[{{.id}}] {{.msg}} {{.error}}": "", + "adding node": "", "addon '{{.name}}' is currently not enabled.\nTo enable this addon run:\nminikube addons enable {{.name}}": "", "addon '{{.name}}' is not a valid addon packaged with minikube.\nTo see the list of available addons run:\nminikube addons list": "", "addons modifies minikube addons files using subcommands like \"minikube addons enable dashboard\"": "", - "api load": "", "bash completion failed": "bash 자동 완성이 실패하였습니다", "call with cleanup=true to remove old tunnels": "", - "command runner": "", "config modifies minikube config files using subcommands like \"minikube config set driver kvm\"\nConfigurable fields:\\n\\n": "", "config view failed": "config view 가 실패하였습니다", "creating api client": "api 클라이언트 생성 중", "dashboard service is not running: {{.error}}": "대시보드 서비스가 실행 중이지 않습니다: {{.error}}", + "deleting node": "", "disable failed": "비활성화가 실패하였습니다", "dry-run mode. Validates configuration, but does not mutate system state": "", "dry-run validation complete!": "dry-run 검증 완료!", @@ -540,39 +607,44 @@ "error parsing the input ip address for mount": "", "error starting tunnel": "", "error stopping tunnel": "", + "error: --output must be 'yaml' or 'json'": "", "failed to open browser: {{.error}}": "", "getting config": "컨피그 조회 중", - "getting primary control plane": "", "if true, will embed the certs in kubeconfig.": "", "if you want to create a profile you can by this command: minikube start -p {{.profile_name}}": "프로필을 생성하려면 다음 커맨드를 입력하세요: minikube start -p {{.profile_name}}\"", + "initialization failed, will try again: {{.error}}": "", "kubeadm detected a TCP port conflict with another process: probably another local Kubernetes installation. Run lsof -p\u003cport\u003e to find the process and kill it": "", "kubectl and minikube configuration will be stored in {{.home_folder}}": "kubectl 과 minikube 환경 정보는 {{.home_folder}} 에 저장될 것입니다", "kubectl not found in PATH, but is required for the dashboard. Installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "kubectl 이 PATH 에 없습니다, 하지만 이는 대시보드에서 필요로 합니다. 설치 가이드:https://kubernetes.io/docs/tasks/tools/install-kubectl/", "kubectl proxy": "kubectl 프록시", + "libmachine failed": "", "loading config": "컨피그 로딩 중", "logdir set failed": "logdir 설정이 실패하였습니다", "machine '{{.name}}' does not exist. Proceeding ahead with recreating VM.": "머신 '{{.name}}' 이 존재하지 않습니다. 진행하기 앞서 가상 머신을 재생성합니다", "max time to wait per Kubernetes core services to be healthy.": "", "minikube addons list --output OUTPUT. json, list": "", "minikube is exiting due to an error. If the above message is not useful, open an issue:": "", + "minikube is not yet compatible with ChromeOS": "", "minikube is unable to access the Google Container Registry. You may need to configure it to use a HTTP proxy.": "", - "minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --driver\n\t- Use --force to override this connectivity check": "", + "minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --vm-driver\n\t- Use --force to override this connectivity check": "", "minikube profile was successfully set to {{.profile_name}}": "", "minikube status --output OUTPUT. json, text": "", - "minikube {{.version}} is available! Download it: {{.url}}": "", + "minikube {{.version}} is available! Download it: {{.url}}": "minikube {{.version}} 이 사용가능합니다! 다음 경로에서 다운받으세요: {{.url}}", "mkcmp is used to compare performance of two minikube binaries": "", "mount argument \"{{.value}}\" must be in form: \u003csource directory\u003e:\u003ctarget directory\u003e": "", "mount failed": "", "namespaces to pause": "", "namespaces to unpause": "", + "none driver does not support multi-node clusters": "", "not enough arguments ({{.ArgCount}}).\\nusage: minikube config set PROPERTY_NAME PROPERTY_VALUE": "", "pause containers": "", "profile sets the current minikube profile, or gets the current profile if no arguments are provided. This is used to run and manage multiple minikube instance. You can return to the default minikube profile by running `minikube profile default`": "", - "profile {{.name}} is not running.": "", "reload cached images.": "", "reloads images previously added using the 'cache add' subcommand": "", "retrieving node": "", + "saving node": "", "service {{.namespace_name}}/{{.service_name}} has no node port": "", + "startup failed": "", "stat failed": "", "status json failure": "", "status text failure": "", @@ -596,18 +668,20 @@ "usage: minikube config unset PROPERTY_NAME": "", "usage: minikube delete": "", "usage: minikube profile [MINIKUBE_PROFILE_NAME]": "", - "zsh completion failed": "", + "version json failure": "", + "version yaml failure": "", + "zsh completion failed": "zsh 완성이 실패하였습니다", + "{{ .name }}: {{ .rejection }}": "", + "{{.driver_name}} \"{{.cluster}}\" {{.machine_type}} is missing, will recreate.": "", "{{.driver}} does not appear to be installed": "{{.driver}} 가 설치되지 않았습니다", "{{.driver}} does not appear to be installed, but is specified by an existing profile. Please run 'minikube delete' or install {{.driver}}": "", "{{.extra_option_component_name}}.{{.key}}={{.value}}": "", - "{{.machine}} IP has been updated to point at {{.ip}}": "", - "{{.machine}} IP was already correctly configured for {{.ip}}": "", "{{.name}} cluster does not exist": "{{.name}} 클러스터가 존재하지 않습니다", "{{.name}} has no available configuration options": "{{.driver}} 이 사용 가능한 환경 정보 옵션이 없습니다", "{{.name}} is already running": "{{.driver}} 이 이미 실행 중입니다", "{{.name}} was successfully configured": "{{.driver}} 이 성공적으로 설정되었습니다", - "{{.path}} is version {{.client_version}}, and is incompatible with Kubernetes {{.cluster_version}}. You will need to update {{.path}} or use 'minikube kubectl' to connect with this cluster": "", - "{{.prefix}}minikube {{.version}} on {{.platform}}": "", + "{{.path}} is v{{.client_version}}, which may be incompatible with Kubernetes v{{.cluster_version}}.": "{{.path}} 의 버전은 v{{.client_version}} 이므로, 쿠버네티스 버전 v{{.cluster_version}} 과 호환되지 않을 수 있습니다", + "{{.prefix}}minikube {{.version}} on {{.platform}}": "{{.prefix}}{{.platform}} 위의 minikube {{.version}}", "{{.type}} is not yet a supported filesystem. We will try anyways!": "", "{{.url}} is not accessible: {{.error}}": "{{.url}} 이 접근 불가능합니다: {{.error}}" } \ No newline at end of file diff --git a/translations/pl.json b/translations/pl.json index 2cfee9621b..a59064b72a 100644 --- a/translations/pl.json +++ b/translations/pl.json @@ -1,11 +1,11 @@ { "\"The '{{.minikube_addon}}' addon is disabled": "", + "\"{{.context}}\" context has been updated to point to {{.hostname}}:{{.port}}": "", + "\"{{.machineName}}\" does not exist, nothing to stop": "", "\"{{.minikube_addon}}\" was successfully disabled": "\"{{.minikube_addon}}\" został wyłaczony", "\"{{.name}}\" profile does not exist": "Profil \"{{.name}}\" nie istnieje", "\"{{.name}}\" profile does not exist, trying anyways.": "", - "\"{{.node_name}}\" stopped.": "", "\"{{.profile_name}}\" VM does not exist, nothing to stop": "Maszyna wirtualna \"{{.profile_name}}\" nie istnieje. Nie można zatrzymać", - "\"{{.profile_name}}\" does not exist, nothing to stop": "", "\"{{.profile_name}}\" host does not exist, unable to show an IP": "Profil \"{{.profile_name}}\" nie istnieje. Nie można wyświetlić adresu IP ", "\"{{.profile_name}}\" stopped.": "Zatrzymano \"{{.profile_name}}\"", "'none' driver does not support 'minikube docker-env' command": "sterownik 'none' nie wspiera komendy 'minikube docker-env'", @@ -13,10 +13,8 @@ "'none' driver does not support 'minikube podman-env' command": "", "'none' driver does not support 'minikube ssh' command": "sterownik 'none' nie wspiera komendy 'minikube ssh'", "'{{.driver}}' driver reported an issue: {{.error}}": "", - "'{{.profile}}' is not running": "", - "- {{.profile}}": "", "A VPN or firewall is interfering with HTTP access to the minikube VM. Alternatively, try a different VM driver: https://minikube.sigs.k8s.io/docs/start/": "", - "A firewall is blocking Docker the minikube VM from reaching the internet. You may need to configure it to use a proxy.": "", + "A firewall is blocking Docker the minikube VM from reaching the image repository. You may need to select --image-repository, or use a proxy.": "", "A firewall is interfering with minikube's ability to make outgoing HTTPS requests. You may need to change the value of the HTTPS_PROXY environment variable.": "", "A firewall is likely blocking minikube from reaching the internet. You may need to configure minikube to use a proxy.": "", "A set of apiserver IP Addresses which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine": "", @@ -32,30 +30,32 @@ "Adds a node to the given cluster config, and starts it.": "", "Adds a node to the given cluster.": "", "Advanced Commands:": "Zaawansowane komendy", - "After minikube restart the dockerd ports might have changed. To ensure docker-env works properly.\nPlease re-eval the docker-env command:\n\n\t'minikube -p {{.profile_name}} docker-env'": "", "Aliases": "Aliasy", "Allow user prompts for more information": "", "Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to \\\"auto\\\" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers": "", "Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g)": "Ilość zarezerwowanej pamieci RAM dla maszyny wirtualnej minikube (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or )", "Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "Ilość zarezerwowanej pamieci RAM dla maszyny wirtualnej minikube (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or )", + "Amount of RAM to allocate to Kubernetes (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "", "Amount of time to wait for a service in seconds": "Czas oczekiwania na serwis w sekundach", "Amount of time to wait for service in seconds": "Czas oczekiwania na servis w sekundach", "Another hypervisor, such as VirtualBox, is conflicting with KVM. Please stop the other hypervisor, or use --driver to switch to it.": "", + "Another program is using a file required by minikube. If you are using Hyper-V, try stopping the minikube VM from within the Hyper-V manager": "", "Automatically selected the {{.driver}} driver": "", "Automatically selected the {{.driver}} driver. Other choices: {{.alternates}}": "", "Available Commands": "Dostępne polecenia", "Basic Commands:": "Podstawowe polecenia", "Because you are using docker driver on Mac, the terminal needs to be open to run it.": "", "Bind Address: {{.Address}}": "", - "Block until the apiserver is servicing API requests": "", + "Both driver={{.driver}} and vm-driver={{.vmd}} have been set.\n\n Since vm-driver is deprecated, minikube will default to driver={{.driver}}.\n\n If vm-driver is set in the global config, please run \"minikube config unset vm-driver\" to resolve this warning.": "", "Cannot find directory {{.path}} for mount": "Nie można odnoleść folderu {{.path}} do zamontowania", "Cannot use both --output and --format options": "", "Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start": "", "Check that SELinux is disabled, and that the provided apiserver flags are valid": "", "Check that minikube is running and that you have specified the correct namespace (-n flag) if required.": "Upewnij się że minikube zpstało uruchomione i że podano poprawną przestrzeń nazw(-n flag) celem zamontowania", - "Check that the provided apiserver flags are valid": "", + "Check that the provided apiserver flags are valid, and that SELinux is disabled": "", "Check that your --kubernetes-version has a leading 'v'. For example: 'v1.1.14'": "Upewnij się że --kubernetes-version ma 'v' z przodu. Na przykład `v1.1.14`", "Check your firewall rules for interference, and run 'virt-host-validate' to check for KVM configuration issues. If you are running minikube within a VM, consider using --driver=none": "", + "Choose a smaller value for --memory, such as 2000": "", "Configuration and Management Commands:": "Polecenia konfiguracji i zarządzania", "Configure a default route on this Linux host, or use another --driver that does not require it": "", "Configure an external network switch following the official documentation, then add `--hyperv-virtual-switch=\u003cswitch-name\u003e` to `minikube start`": "", @@ -68,10 +68,11 @@ "Could not process errors from failed deletion": "", "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.": "", "Created a new profile : {{.profile_name}}": "Stworzono nowy profil : {{.profile_name}}", - "Creating Kubernetes in {{.driver_name}} container with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "", + "Creating Kubernetes in {{.driver_name}} {{.machine_type}} with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "", "Creating a new profile failed": "Tworzenie nowego profilu nie powiodło się", "Creating mount {{.name}} ...": "", "Creating {{.driver_name}} VM (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "Tworzenie {{.driver_name}} (CPUs={{.number_of_cpus}}, Pamięć={{.memory_size}}MB, Dysk={{.disk_size}}MB)...", + "Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", "DEPRECATED, use `driver` instead.": "", "Default group id used for the mount": "Domyślne id groupy użyte dla montowania", "Default user id used for the mount": "Domyślne id użytkownia użyte dla montowania ", @@ -98,10 +99,11 @@ "Done! kubectl is now configured to use \"{{.name}}": "Gotowe! kubectl jest skonfigurowany do użycia z \"{{.name}}\".", "Done! kubectl is now configured to use \"{{.name}}\"": "Gotowe! kubectl jest skonfigurowany do użycia z \"{{.name}}\".", "Download complete!": "Pobieranie zakończone!", + "Downloading Kubernetes {{.version}} preload ...": "", "Downloading VM boot image ...": "Pobieranie obrazu maszyny wirtualnej ...", "Downloading driver {{.driver}}:": "", - "Downloading preloaded images tarball for k8s {{.version}} ...": "", "Downloading {{.name}} {{.version}}": "Pobieranie {{.name}} {{.version}}", + "Due to {{.driver_name}} networking limitations on {{.os_name}}, {{.addon_name}} addon is not supported for this driver.\nAlternatively to use this addon you can use a vm-based driver:\n\n\t'minikube start --vm=true'\n\nTo track the update on this work in progress feature please check:\nhttps://github.com/kubernetes/minikube/issues/7332": "", "ERROR creating `registry-creds-acr` secret": "", "ERROR creating `registry-creds-dpr` secret": "", "ERROR creating `registry-creds-ecr` secret: {{.error}}": "", @@ -110,7 +112,6 @@ "Enable addons. see `minikube addons list` for a list of valid addon names.": "", "Enable experimental NVIDIA GPU support in minikube": "aktywuj eksperymentalne wsparcie minikube dla NVIDIA GPU", "Enable host resolver for NAT DNS requests (virtualbox driver only)": "", - "Enable istio needs {{.minMem}} MB of memory and {{.minCpus}} CPUs.": "", "Enable proxy for NAT DNS requests (virtualbox driver only)": "", "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \\\"--network-plugin=cni\\\".": "", "Enables the addon w/ADDON_NAME within minikube (example: minikube addons enable dashboard). For a list of available addons use: minikube addons list": "", @@ -122,7 +123,6 @@ "Ensure that the user listed in /etc/libvirt/qemu.conf has access to your home directory": "", "Ensure that your value for HTTPS_PROXY points to an HTTPS proxy rather than an HTTP proxy": "", "Environment variables to pass to the Docker daemon. (format: key=value)": "Zmienne środowiskowe do przekazania do demona docker (format: klucz-wartość)", - "Error adding node to cluster": "", "Error checking driver version: {{.error}}": "Błąd podczas sprawdzania wersij sterownika : {{.error}}", "Error creating minikube directory": "", "Error creating view template": "", @@ -131,46 +131,30 @@ "Error finding port for mount": "", "Error generating set output": "", "Error generating unset output": "", - "Error getting IP": "", - "Error getting client": "", - "Error getting client: {{.error}}": "", - "Error getting cluster": "", "Error getting cluster bootstrapper": "", "Error getting cluster config": "", - "Error getting config": "", - "Error getting control plane": "", "Error getting host": "", - "Error getting host IP": "", - "Error getting host status": "", - "Error getting machine logs": "", "Error getting port binding for '{{.driver_name}} driver: {{.error}}": "", "Error getting primary control plane": "", - "Error getting primary cp": "", - "Error getting service status": "", "Error getting service with namespace: {{.namespace}} and labels {{.labelName}}:{{.addonName}}: {{.error}}": "", "Error getting ssh client": "", "Error getting the host IP address to use from within the VM": "", - "Error host driver ip status": "", "Error killing mount process": "", - "Error loading api": "", - "Error loading profile config": "", "Error loading profile config: {{.error}}": "", "Error opening service": "", "Error parsing Driver version: {{.error}}": "Błąd parsowania wersji Driver: {{.error}}", "Error parsing minikube version: {{.error}}": "Bład parsowania wersji minikube: {{.error}}", "Error reading {{.path}}: {{.error}}": "Błąd odczytu {{.path}} {{.error}}", "Error restarting cluster": "Błąd podczas restartowania klastra", - "Error retrieving node": "", "Error setting shell variables": "Błąd podczas ustawiania zmiennych powłoki(shell)", "Error starting cluster": "Błąd podczas uruchamiania klastra", "Error starting mount": "", - "Error starting node": "", "Error while setting kubectl current context : {{.error}}": "Błąd podczas ustawiania kontekstu kubectl: {{.error}}", "Error writing mount pid": "", - "Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}\"": "", "Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}": "Erreur : Vous avez sélectionné Kubernetes v{{.new}}, mais le cluster existent pour votre profil exécute Kubernetes v{{.old}}. Les rétrogradations non-destructives ne sont pas compatibles. Toutefois, vous pouvez poursuivre le processus en réalisant l'une des trois actions suivantes :\n* Créer à nouveau le cluster en utilisant Kubernetes v{{.new}} – exécutez \"minikube delete {{.profile}}\", puis \"minikube start {{.profile}} --kubernetes-version={{.new}}\".\n* Créer un second cluster avec Kubernetes v{{.new}} – exécutez \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\".\n* Réutiliser le cluster existent avec Kubernetes v{{.old}} ou version ultérieure – exécutez \"minikube start {{.profile}} --kubernetes-version={{.old}}\".", - "Error: [{{.id}}] {{.error}}": "", "Examples": "Przykłady", + "Executing \"{{.command}}\" took an unusually long time: {{.duration}}": "", + "Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'": "", "Exiting": "", "Exiting.": "", "External Adapter on which external switch will be created if no external switch is found. (hyperv driver only)": "", @@ -178,21 +162,21 @@ "Failed to cache ISO": "", "Failed to cache and load images": "", "Failed to cache binaries": "", + "Failed to cache images": "", "Failed to cache images to tar": "", "Failed to cache kubectl": "", "Failed to change permissions for {{.minikube_dir_path}}: {{.error}}": "Nie udało się zmienić uprawnień pliku {{.minikube_dir_path}}: {{.error}}", - "Failed to check if machine exists": "", "Failed to check main repository and mirrors for images for images": "", + "Failed to delete cluster {{.name}}, proceeding with retry anyway.": "", "Failed to delete cluster: {{.error}}": "", "Failed to delete images": "", "Failed to delete images from config": "", - "Failed to delete node {{.name}}": "", "Failed to download kubectl": "Pobieranie kubectl nie powiodło się", "Failed to enable container runtime": "", "Failed to generate config": "", + "Failed to get API Server URL": "", "Failed to get bootstrapper": "", "Failed to get command runner": "", - "Failed to get driver URL": "", "Failed to get image map": "", "Failed to get machine client": "", "Failed to get service URL: {{.error}}": "", @@ -204,27 +188,29 @@ "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.": "", "Failed to setup certs": "Konfiguracja certyfikatów nie powiodła się", "Failed to setup kubeconfig": "Konfiguracja kubeconfig nie powiodła się", - "Failed to start node {{.name}}": "", "Failed to stop node {{.name}}": "", "Failed to update cluster": "Aktualizacja klastra nie powiodła się", "Failed to update config": "Aktualizacja konfiguracji nie powiodła się", + "Failed to validate '{{.driver}}' driver": "", "Failed unmount: {{.error}}": "", "File permissions used for the mount": "", + "Filter to use only VM Drivers": "", "Flags": "", "Follow": "", "For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "", - "For more information, see:": "", + "For more information, see: https://minikube.sigs.k8s.io/docs/reference/drivers/none/": "", "Force environment to be configured for a specified shell: [fish, cmd, powershell, tcsh, bash, zsh], default is auto-detect": "", "Force minikube to perform possibly dangerous operations": "Wymuś wykonanie potencjalnie niebezpiecznych operacji", "Found network options:": "Wykryto opcje sieciowe:", "Found {{.number}} invalid profile(s) !": "Wykryto {{.number}} nieprawidłowych profili ! ", + "Generate unable to parse disk size '{{.diskSize}}': {{.error}}": "", + "Generate unable to parse memory '{{.memory}}': {{.error}}": "", "Gets the kubernetes URL(s) for the specified service in your local cluster": "", "Gets the kubernetes URL(s) for the specified service in your local cluster. In the case of multiple URLs they will be printed one at a time.": "", "Gets the logs of the running instance, used for debugging minikube, not user code.": "Pobiera logi z aktualnie uruchomionej instancji. Przydatne do debugowania kodu który nie należy do aplikacji użytkownika", "Gets the status of a local kubernetes cluster": "Pobiera aktualny status klastra kubernetesa", "Gets the status of a local kubernetes cluster.\n\tExit status contains the status of minikube's VM, cluster and kubernetes encoded on it's bits in this order from right to left.\n\tEg: 7 meaning: 1 (for minikube NOK) + 2 (for cluster NOK) + 4 (for kubernetes NOK)": "", "Gets the value of PROPERTY_NAME from the minikube config file": "", - "Getting machine config failed": "", "Global Flags": "", "Go template format string for the cache list output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#CacheListTemplate": "", "Go template format string for the config view output. The format for Go templates can be found here: https://golang.org/pkg/text/template/\nFor the list of accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate": "", @@ -235,6 +221,8 @@ "Hyperkit is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "", "Hyperkit networking is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "", "If set, automatically updates drivers to the latest version. Defaults to true.": "", + "If set, delete the current cluster if start fails and try again. Defaults to false.": "", + "If set, download tarball of preloaded images if available to improve start time. Defaults to true.": "", "If set, install addons. Defaults to true.": "", "If set, pause all namespaces": "", "If set, unpause all namespaces": "", @@ -251,6 +239,8 @@ "Install the latest hyperkit binary, and run 'minikube delete'": "", "Invalid size passed in argument: {{.error}}": "Nieprawidłowy rozmiar przekazany w argumencie: {{.error}}", "IsEnabled failed": "", + "Istio needs {{.minCPUs}} CPUs -- your configuration only allocates {{.cpus}} CPUs": "", + "Istio needs {{.minMem}}MB of memory -- your configuration only allocates {{.memory}}MB": "", "Kill the mount process spawned by minikube start": "", "Kubernetes {{.new}} is now available. If you would like to upgrade, specify: --kubernetes-version={{.new}}": "", "Kubernetes {{.version}} is not supported by this release of minikube": "", @@ -266,6 +256,7 @@ "Location of the VPNKit socket used for networking. If empty, disables Hyperkit VPNKitSock, if 'auto' uses Docker for Mac VPNKit connection, otherwise uses the specified VSock (hyperkit driver only)": "", "Location of the minikube iso": "Ścieżka do obrazu iso minikube", "Location of the minikube iso.": "Ścieżka do obrazu iso minikube", + "Locations to fetch the minikube ISO from.": "", "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'": "Zaloguj się i wykonaj polecenie w maszynie za pomocą ssh. Podobne do 'docker-machine ssh'", "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'.": "Zaloguj się i wykonaj polecenie w maszynie za pomocą ssh. Podobne do 'docker-machine ssh'", "Message Size: {{.size}}": "", @@ -273,6 +264,7 @@ "Minikube is a tool for managing local Kubernetes clusters.": "", "Modify minikube config": "", "Modify minikube's kubernetes addons": "", + "Most users should use the newer 'docker' driver instead, which does not require root!": "", "Mount type: {{.name}}": "", "Mounting host path {{.sourcePath}} into VM as {{.destinationPath}} ...": "", "Mounts the specified directory into minikube": "Montuje podany katalog wewnątrz minikube", @@ -282,18 +274,24 @@ "NIC Type used for host only network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only)": "", "NOTE: This process must stay alive for the mount to be accessible ...": "", "Networking and Connectivity Commands:": "", + "No changes required for the \"{{.context}}\" context": "", "No minikube profile was found. You can create one using `minikube start`.": "", - "Node may be unable to resolve external DNS records": "", + "Node \"{{.node_name}}\" stopped.": "", "Node operations": "", + "Node {{.name}} failed to start, deleting and trying again.": "", "Node {{.name}} was successfully deleted.": "", + "Node {{.nodeName}} does not exist.": "", + "Non-destructive downgrades are not supported, but you can proceed with one of the following options:\n\n 1) Recreate the cluster with Kubernetes v{{.new}}, by running:\n\n minikube delete{{.profile}}\n minikube start{{.profile}} --kubernetes-version={{.new}}\n\n 2) Create a second cluster with Kubernetes v{{.new}}, by running:\n\n minikube start -p {{.suggestedName}} --kubernetes-version={{.new}}\n\n 3) Use the existing cluster at version Kubernetes v{{.old}}, by running:\n\n minikube start{{.profile}} --kubernetes-version={{.old}}": "", "None of the known repositories in your location are accessible. Using {{.image_repository_name}} as fallback.": "", "None of the known repositories is accessible. Consider specifying an alternative image repository with --image-repository flag": "", "Not passing {{.name}}={{.value}} to docker env.": "", - "Noticed that you are using minikube docker-env:": "", + "Noticed you have an activated docker-env on {{.driver_name}} driver in this terminal:": "", + "Number of CPUs allocated to Kubernetes.": "", "Number of CPUs allocated to the minikube VM": "Liczba procesorów przypisana do maszyny wirtualnej minikube", "Number of CPUs allocated to the minikube VM.": "Liczba procesorów przypisana do maszyny wirtualnej minikube", "Number of lines back to go within the log": "", "OS release is {{.pretty_name}}": "", + "One of 'yaml' or 'json'.": "", "Open the addons URL with https instead of http": "", "Open the service URL with https instead of http": "", "Opening kubernetes service {{.namespace_name}}/{{.service_name}} in default browser...": "", @@ -312,48 +310,59 @@ "Please install the minikube hyperkit VM driver, or select an alternative --driver": "", "Please install the minikube kvm2 VM driver, or select an alternative --driver": "", "Please make sure the service you are looking for is deployed or is in the correct namespace.": "Proszę upewnij się, że serwis którego szukasz znajduje się w prawidłowej przestrzeni nazw", + "Please re-eval your docker-env, To ensure your environment variables have updated ports: \n\n\t'minikube -p {{.profile_name}} docker-env'": "", "Please specify the directory to be mounted: \n\tminikube mount \u003csource directory\u003e:\u003ctarget directory\u003e (example: \"/host-home:/vm-home\")": "", "Please upgrade the '{{.driver_executable}}'. {{.documentation_url}}": "Proszę zaktualizować '{{.driver_executable}}'. {{.documentation_url}}", "Populates the specified folder with documentation in markdown about minikube": "", "Powering off \"{{.profile_name}}\" via SSH ...": "", "Preparing Kubernetes {{.k8sVersion}} on {{.runtime}} {{.runtimeVersion}} ...": "przygowowywanie Kubernetesa {{.k8sVersion}} na {{.runtime}} {{.runtimeVersion}}...", "Print current and latest version number": "Wyświetl aktualna i najnowszą wersję", + "Print just the version number.": "", "Print the version of minikube": "Wyświetl wersję minikube", "Print the version of minikube.": "Wyświetl wersję minikube", "Problems detected in {{.entry}}:": "Wykryto problem w {{.name}}", "Problems detected in {{.name}}:": "Wykryto problem w {{.name}}", "Profile gets or sets the current minikube profile": "Pobiera lub ustawia aktywny profil minikube", - "Profile name \"{{.profilename}}\" is minikube keyword. To delete profile use command minikube delete -p \u003cprofile name\u003e": "", + "Profile name \"{{.profilename}}\" is reserved keyword. To delete this profile, run: \"{{.cmd}}\"": "", "Provide VM UUID to restore MAC address (hyperkit driver only)": "", + "Pulling base image ...": "", "Reboot to complete VirtualBox installation, and verify that VirtualBox is not blocked by your system": "Uruchom ponownie komputer aby zakończyć instalacje VirtualBox'a i upewnij się że nie jest on blokowany przez twój system", "Reboot to complete VirtualBox installation, verify that VirtualBox is not blocked by your system, and/or use another hypervisor": "", "Rebuild libvirt with virt-network support": "", "Received {{.name}} signal": "", - "Reconfiguring existing host ...": "", "Registry mirrors to pass to the Docker daemon": "", "Reinstall VirtualBox and reboot. Alternatively, try the kvm2 driver: https://minikube.sigs.k8s.io/docs/reference/drivers/kvm2/": "", "Reinstall VirtualBox and verify that it is not blocked: System Preferences -\u003e Security \u0026 Privacy -\u003e General -\u003e Some system software was blocked from loading": "", + "Related issue: {{.url}}": "", "Related issues:": "Powiązane problemy", + "Remove the incompatible --docker-opt flag if one was provided": "", "Removed all traces of the \"{{.name}}\" cluster.": "", "Removing {{.directory}} ...": "", "Requested cpu count {{.requested_cpus}} is less than the minimum allowed of {{.minimum_cpus}}": "", "Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}": "", - "Requested memory allocation ({{.memory}}MB) is less than the default memory allocation of {{.default_memorysize}}MB. Beware that minikube might not work correctly or crash unexpectedly.": "", - "Requested memory allocation {{.requested_size}} is less than the minimum allowed of {{.minimum_size}}": "", + "Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommended}}MB. Kubernetes may crash unexpectedly.": "", + "Requested memory allocation {{.requested}}MB is less than the usable minimum of {{.minimum}}MB": "", + "Restart Docker": "", + "Restarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "", + "Restarting the {{.name}} service may improve performance.": "", "Retrieve the ssh identity key path of the specified cluster": "Pozyskuje ścieżkę do klucza ssh dla wyspecyfikowanego klastra", "Retrieve the ssh identity key path of the specified cluster.": "Pozyskuje ścieżkę do klucza ssh dla wyspecyfikowanego klastra.", "Retrieves the IP address of the running cluster": "Pobiera adres IP aktualnie uruchomionego klastra", "Retrieves the IP address of the running cluster, and writes it to STDOUT.": "Pobiera adres IP aktualnie uruchomionego klastra i wypisuje go do STDOUT", "Retrieves the IP address of the running cluster, checks it\n\t\t\twith IP in kubeconfig, and corrects kubeconfig if incorrect.": "", "Returns the value of PROPERTY_NAME from the minikube config file. Can be overwritten at runtime by flags or environmental variables.": "", + "Right-click the PowerShell icon and select Run as Administrator to open PowerShell in elevated mode.": "", "Run 'kubectl describe pod coredns -n kube-system' and check for a firewall or DNS conflict": "", "Run 'minikube delete' to delete the stale VM, or and ensure that minikube is running as the same user you are issuing this command with": "", + "Run 'sudo sysctl fs.protected_regular=1', or try a driver which does not require root, such as '--driver=docker'": "", "Run kubectl": "Uruchamia kubectl", "Run minikube from the C: drive.": "", "Run the kubernetes client, download it if necessary. Remember -- after kubectl!\n\nExamples:\nminikube kubectl -- --help\nminikube kubectl -- get pods --namespace kube-system": "", - "Run the minikube command as an Administrator": "", "Run: 'chmod 600 $HOME/.kube/config'": "", + "Run: 'kubectl delete clusterrolebinding kubernetes-dashboard'": "", + "Run: 'sudo mkdir /sys/fs/cgroup/systemd \u0026\u0026 sudo mount -t cgroup -o none,name=systemd cgroup /sys/fs/cgroup/systemd'": "", "Running on localhost (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", + "Service '{{.service}}' was not found in '{{.namespace}}' namespace.\nYou may select another namespace by using 'minikube service {{.service}} -n \u003cnamespace\u003e'. Or list out all the services using 'minikube service list'": "", "Set failed": "", "Set flag to delete all profiles": "", "Set this flag to delete the '.minikube' folder from your user directory.": "", @@ -368,6 +377,7 @@ "Show only log entries which point to known problems": "Pokaż logi które wskazują na znane problemy", "Show only the most recent journal entries, and continuously print new entries as they are appended to the journal.": "", "Skipped switching kubectl context for {{.profile_name}} because --keep-context was set.": "Zignorowano zmianę kontekstu kubectl ponieważ --keep-context zostało przekazane", + "Sorry, Kubernetes v{{.k8sVersion}} requires conntrack to be installed in root's path": "", "Sorry, Kubernetes {{.version}} is not supported by this release of minikube": "", "Sorry, completion support is not yet implemented for {{.name}}": "", "Sorry, the kubeadm.{{.parameter_name}} parameter is currently not supported by --extra-config": "", @@ -379,8 +389,10 @@ "Specify the 9p version that the mount should use": "", "Specify the ip that the mount should be setup on": "", "Specify the mount filesystem type (supported types: 9p)": "", - "Starting existing {{.driver_name}} VM for \"{{.profile_name}}\" ...": "", - "Starting node": "", + "Start failed after cluster deletion": "", + "StartHost failed, but will try again: {{.error}}": "", + "Starting control plane node {{.name}} in cluster {{.cluster}}": "", + "Starting node {{.name}} in cluster {{.cluster}}": "", "Starting tunnel for service {{.service}}.": "", "Starts a local kubernetes cluster": "Uruchamianie lokalnego klastra kubernetesa", "Starts a node.": "", @@ -393,17 +405,16 @@ "Successfully added {{.name}} to {{.cluster}}!": "", "Successfully deleted all profiles": "", "Successfully mounted {{.sourcePath}} to {{.destinationPath}}": "Pomyślnie zamontowano {{.sourcePath}} do {{.destinationPath}}", - "Successfully powered off Hyper-V. minikube driver -- {{.driver}}": "", "Successfully purged minikube directory located at - [{{.minikubeDirectory}}]": "", "Suggestion: {{.advice}}": "Sugestia: {{.advice}}", "Suggestion: {{.fix}}": "", "Target directory {{.path}} must be an absolute path": "", "The \"{{.cluster_name}}\" cluster has been deleted.": "Klaster \"{{.cluster_name}}\" został usunięty", - "The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube --driver={{.driver_name}}'.": "", "The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube --vm-driver={{.driver_name}}'.": "Sterownik \"{{.driver_name}}\" wymaga uprawnień root'a. Użyj 'sudo minikube --vm-driver={{.driver_name}}'", + "The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube start --driver={{.driver_name}}'.": "", "The \"{{.driver_name}}\" driver should not be used with root privileges.": "", "The \"{{.name}}\" cluster has been deleted.": "Klaster \"{{.name}}\" został usunięty", - "The 'none' driver provides limited isolation and may reduce system security and reliability.": "", + "The 'none' driver is designed for experts who need to integrate with an existing VM": "", "The '{{.addonName}}' addon is enabled": "", "The '{{.driver}}' driver requires elevated permissions. The following commands will be executed:\\n\\n{{ .example }}\\n": "", "The '{{.name}} driver does not support multiple profiles: https://minikube.sigs.k8s.io/docs/reference/drivers/none/": "", @@ -418,17 +429,21 @@ "The VM driver exited with an error, and may be corrupt. Run 'minikube start' with --alsologtostderr -v=8 to see the error": "", "The VM that minikube is configured for no longer exists. Run 'minikube delete'": "", "The apiserver listening port": "API nasłuchuje na porcie:", - "The apiserver name which is used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine": "", "The argument to pass the minikube mount command on start.": "", + "The authoritative apiserver hostname for apiserver certificates and connectivity. This can be used if you want to make the apiserver available from outside the machine": "", "The cluster dns domain name used in the kubernetes cluster": "Domena dns clastra użyta przez kubernetesa", "The container runtime to be used (docker, crio, containerd)": "Runtime konteneryzacji (docker, crio, containerd).", "The container runtime to be used (docker, crio, containerd).": "", + "The control plane for \"{{.name}}\" is paused!": "", + "The control plane node \"{{.name}}\" does not exist.": "", + "The control plane node is not running (state={{.state}})": "", + "The control plane node must be running for this command": "", "The cri socket path to be used.": "", "The docker service is currently not active": "Serwis docker jest nieaktywny", - "The docker service within '{{.profile}}' is not active": "", + "The docker service within '{{.name}}' is not active": "", + "The docker-env command is only compatible with the \"docker\" runtime, but this cluster was configured to use the \"{{.runtime}}\" runtime.": "", "The driver '{{.driver}}' is not supported on {{.os}}": "Sterownik '{{.driver}} jest niewspierany przez system {{.os}}", - "The driver {{.experimental}} '{{.driver}}' is not supported on {{.os}}": "", - "The existing \"{{.profile_name}}\" VM that was created using the \"{{.old_driver}}\" driver, and is incompatible with the \"{{.driver}}\" driver.": "", + "The existing \"{{.name}}\" VM was created using the \"{{.old}}\" driver, and is incompatible with the \"{{.new}}\" driver.": "", "The hyperv virtual switch name. Defaults to first found. (hyperv driver only)": "", "The hypervisor does not appear to be configured properly. Run 'minikube start --alsologtostderr -v=1' and inspect the error code": "", "The initial time interval for each check that wait performs in seconds": "", @@ -440,10 +455,13 @@ "The name of the node to delete": "", "The name of the node to start": "", "The node to get logs from. Defaults to the primary control plane.": "", + "The node to ssh into. Defaults to the primary control plane.": "", + "The none driver is not compatible with multi-node clusters.": "", "The number of bytes to use for 9p packet payload": "", + "The number of nodes to spin up. Defaults to 1.": "", "The output format. One of 'json', 'table'": "", "The path on the file system where the docs in markdown need to be saved": "", - "The podman service within '{{.profile}}' is not active": "", + "The podman service within '{{.cluster}}' is not active": "", "The service namespace": "", "The service {{.service}} requires privileged ports to be exposed: {{.ports}}": "", "The services namespace": "", @@ -452,43 +470,60 @@ "The value passed to --format is invalid: {{.error}}": "Wartość przekazana do --format jest nieprawidłowa: {{.error}}", "The vmwarefusion driver is deprecated and support for it will be removed in a future release.\n\t\t\tPlease consider switching to the new vmware unified driver, which is intended to replace the vmwarefusion driver.\n\t\t\tSee https://minikube.sigs.k8s.io/docs/reference/drivers/vmware/ for more information.\n\t\t\tTo disable this message, run [minikube config set ShowDriverDeprecationNotification false]": "", "The {{.driver_name}} driver should not be used with root privileges.": "{{.driver_name}} nie powinien byc używany z przywilejami root'a.", + "There is no local cluster named \"{{.cluster}}\"": "", "These changes will take effect upon a minikube delete and then a minikube start": "", "This addon does not have an endpoint defined for the 'addons open' command.\nYou can add one by annotating a service with the label {{.labelName}}:{{.addonName}}": "", "This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true": "", + "This control plane is not running! (state={{.state}})": "", + "This driver does not yet work on your architecture. Maybe try --driver=none": "", + "This is unusual - you may want to investigate using \"{{.command}}\"": "", "This will keep the existing kubectl context and will create a minikube context.": "", "This will start the mount daemon and automatically mount files into minikube.": "", - "Tip: To remove this root owned cluster, run: sudo {{.cmd}} delete": "", + "This {{.type}} is having trouble accessing https://{{.repository}}": "", + "Tip: To remove this root owned cluster, run: sudo {{.cmd}}": "", "To connect to this cluster, use: kubectl --context={{.name}}": "Aby połączyć się z klastrem użyj: kubectl --context={{.name}}", "To connect to this cluster, use: kubectl --context={{.profile_name}}": "Aby połaczyć się z klastem uzyj: kubectl --context={{.profile_name}}", "To disable this notice, run: 'minikube config set WantUpdateNotification false'": "Aby wyłączyć te notyfikację, użyj: 'minikube config set WantUpdateNotification false'", "To disable this notice, run: 'minikube config set WantUpdateNotification false'\\n": "", - "To proceed, either:\n\n 1) Delete the existing \"{{.profile_name}}\" cluster using: '{{.command}} delete'\n\n * or *\n\n 2) Start the existing \"{{.profile_name}}\" cluster using: '{{.command}} start --driver={{.old_driver}}'": "", + "To fix this, run: \"{{.command}}\"": "", + "To proceed, either:\n\n1) Delete the existing \"{{.name}}\" cluster using: '{{.delcommand}}'\n\n* or *\n\n2) Start the existing \"{{.name}}\" cluster using: '{{.command}} --driver={{.old}}'": "", + "To pull new external images, you may need to configure a proxy: https://minikube.sigs.k8s.io/docs/reference/networking/proxy/": "", "To see addons list for other profiles use: `minikube addons -p name list`": "", + "To start minikube with Hyper-V, Powershell must be in your PATH`": "", "To start minikube with HyperV Powershell must be in your PATH`": "Aby uruchomić minikube z HyperV Powershell musi znajdować się w zmiennej PATH", "To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:": "", "Troubleshooting Commands:": "", + "Try 'minikube delete' to force new SSL certificates to be installed": "", + "Try 'minikube delete', and disable any conflicting VPN or firewall software": "", + "Try specifying a --driver, or see https://minikube.sigs.k8s.io/docs/start/": "", "Trying to delete invalid profile {{.profile}}": "", "Unable to bind flags": "", - "Unable to determine a default driver to use. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/": "", "Unable to enable dashboard": "", "Unable to fetch latest version info": "", + "Unable to find control plane": "", "Unable to generate docs": "", "Unable to generate the documentation. Please ensure that the path specified is a directory, exists \u0026 you have permission to write to it.": "", "Unable to get VM IP address": "", "Unable to get addon status for {{.name}}: {{.error}}": "", + "Unable to get command runner": "", + "Unable to get control plane status: {{.error}}": "", "Unable to get current user": "", + "Unable to get forwarded endpoint": "", + "Unable to get machine status": "", "Unable to get runtime": "", - "Unable to get the status of the {{.name}} cluster.": "", "Unable to kill mount process: {{.error}}": "", "Unable to load cached images from config file.": "", "Unable to load cached images: {{.error}}": "", "Unable to load config: {{.error}}": "", + "Unable to load host": "", "Unable to parse \"{{.kubernetes_version}}\": {{.error}}": "", "Unable to parse default Kubernetes version from constants: {{.error}}": "", + "Unable to parse memory '{{.memory}}': {{.error}}": "", "Unable to parse oldest Kubernetes version from constants: {{.error}}": "", - "Unable to remove machine directory: %v": "", + "Unable to pick a default driver. Here is what was considered, in preference order:": "", + "Unable to remove machine directory": "", + "Unable to restart cluster, will reset it: {{.error}}": "", "Unable to start VM": "Nie można uruchomić maszyny wirtualnej", - "Unable to start VM. Please investigate and run 'minikube delete' if possible": "", "Unable to stop VM": "Nie można zatrzymać maszyny wirtualnej", "Unable to update {{.driver}} driver: {{.error}}": "", "Unable to verify SSH connectivity: {{.error}}. Will retry...": "", @@ -499,6 +534,7 @@ "Unset the KUBECONFIG environment variable, or verify that it does not point to an empty or otherwise invalid path": "", "Unset variables instead of setting them": "", "Update server returned an empty list": "", + "Updating the running {{.driver_name}} \"{{.cluster}}\" {{.machine_type}} ...": "", "Upgrade to QEMU v3.1.0+, run 'virt-host-validate', or ensure that you are not running in a nested VM environment.": "", "Usage": "", "Usage: minikube completion SHELL": "", @@ -518,11 +554,10 @@ "Userspace file server:": "", "Using image repository {{.name}}": "", "Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!": "", - "Using the running {{.driver_name}} \"{{.profile_name}}\" VM ...": "", "Using the {{.driver}} driver based on existing profile": "", "Using the {{.driver}} driver based on user configuration": "", "VM driver is one of: %v": "Sterownik wirtualnej maszyny to jeden z: %v", - "VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository": "", + "Validation unable to parse disk size '{{.diskSize}}': {{.error}}": "", "Verify that your HTTP_PROXY and HTTPS_PROXY environment variables are set correctly.": "Weryfikuję czy zmienne HTTP_PROXY i HTTPS_PROXY sa ustawione poprawnie", "Verify the IP address of the running cluster in kubeconfig.": "Weryfikuję adres IP działającego klastra w kubeconfig", "Verifying dashboard health ...": "Weryfikuję status dashboardu", @@ -534,63 +569,65 @@ "VirtualBox is broken. Disable real-time anti-virus software, reboot, and reinstall VirtualBox if the problem continues.": "", "VirtualBox is broken. Reinstall VirtualBox, reboot, and run 'minikube delete'.": "", "VirtualBox is unable to find its network interface. Try upgrading to the latest release and rebooting.": "", - "Virtualization support is disabled on your computer. If you are running minikube within a VM, try '--driver=none'. Otherwise, consult your systems BIOS manual for how to enable virtualization.": "", - "Wait failed": "", + "Virtualization support is disabled on your computer. If you are running minikube within a VM, try '--driver=docker'. Otherwise, consult your systems BIOS manual for how to enable virtualization.": "", "Wait failed: {{.error}}": "", "Waiting for SSH access ...": "Oczekiwanie na połaczenie SSH...", - "Waiting for cluster to come online ...": "", "Waiting for:": "Oczekiwanie na :", "Where to root the NFS Shares, defaults to /nfsshares (hyperkit driver only)": "", "Whether to use external switch over Default Switch if virtual switch not explicitly specified. (hyperv driver only)": "", "You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details": "", + "You can also use 'minikube kubectl -- get pods' to invoke a matching version": "", "You can delete them using the following command(s):": "", + "You cannot change the CPUs for an exiting minikube cluster. Please first delete the cluster.": "", + "You cannot change the Disk size for an exiting minikube cluster. Please first delete the cluster.": "", + "You cannot change the memory size for an exiting minikube cluster. Please first delete the cluster.": "", + "You have selected Kubernetes v{{.new}}, but the existing cluster is running Kubernetes v{{.old}}": "", "You may need to manually remove the \"{{.name}}\" VM from your hypervisor": "", "You may need to stop the Hyper-V Manager and run `minikube delete` again.": "", "You must specify a service name": "Musisz podać nazwę serwisu", "Your host does not support KVM virtualization. Ensure that qemu-kvm is installed, and run 'virt-host-validate' to debug the problem": "Twoje środowisko nie wspiera virtualizacji KVM. Upewnij się że qemu-kvm jest zainstalowane i uruchom 'virt-host-validate' aby rozwiązać problem.", - "Your host does not support virtualization. If you are running minikube within a VM, try '--driver=none'. Otherwise, enable virtualization in your BIOS": "", + "Your host does not support virtualization. If you are running minikube within a VM, try '--driver=docker'. Otherwise, enable virtualization in your BIOS": "", "Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.": "", "Your minikube config refers to an unsupported driver. Erase ~/.minikube, and try again.": "", "Your minikube vm is not running, try minikube start.": "", + "[{{.id}}] {{.msg}} {{.error}}": "", + "adding node": "", "addon '{{.name}}' is currently not enabled.\nTo enable this addon run:\nminikube addons enable {{.name}}": "", "addon '{{.name}}' is not a valid addon packaged with minikube.\nTo see the list of available addons run:\nminikube addons list": "", "addons modifies minikube addons files using subcommands like \"minikube addons enable dashboard\"": "", - "api load": "", "bash completion failed": "", "call with cleanup=true to remove old tunnels": "", - "command runner": "", "config modifies minikube config files using subcommands like \"minikube config set driver kvm\"\nConfigurable fields:\\n\\n": "", "config view failed": "", - "creating api client": "", "dashboard service is not running: {{.error}}": "", + "deleting node": "", "disable failed": "", "dry-run mode. Validates configuration, but does not mutate system state": "", "dry-run validation complete!": "", "enable failed": "", "error creating clientset": "", - "error creating machine client": "", "error getting primary control plane": "", "error getting ssh port": "", "error parsing the input ip address for mount": "", "error starting tunnel": "", "error stopping tunnel": "", + "error: --output must be 'yaml' or 'json'": "", "failed to open browser: {{.error}}": "Nie udało się otworzyć przeglądarki: {{.error}}", - "getting config": "", - "getting primary control plane": "", "if true, will embed the certs in kubeconfig.": "", "if you want to create a profile you can by this command: minikube start -p {{.profile_name}}": "", + "initialization failed, will try again: {{.error}}": "", "kubeadm detected a TCP port conflict with another process: probably another local Kubernetes installation. Run lsof -p\u003cport\u003e to find the process and kill it": "", "kubectl and minikube configuration will be stored in {{.home_folder}}": "konfiguracja minikube i kubectl będzie przechowywana w katalogu {{.home_dir}}", "kubectl not found in PATH, but is required for the dashboard. Installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "kubectl nie zostało odnaleźione w zmiennej środowiskowej ${PATH}. Instrukcja instalacji: https://kubernetes.io/docs/tasks/tools/install-kubectl/", "kubectl proxy": "", - "loading config": "", + "libmachine failed": "", "logdir set failed": "", - "machine '{{.name}}' does not exist. Proceeding ahead with recreating VM.": "", "max time to wait per Kubernetes core services to be healthy.": "", "minikube addons list --output OUTPUT. json, list": "", "minikube is exiting due to an error. If the above message is not useful, open an issue:": "", + "minikube is not yet compatible with ChromeOS": "", "minikube is unable to access the Google Container Registry. You may need to configure it to use a HTTP proxy.": "", - "minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --driver\n\t- Use --force to override this connectivity check": "", + "minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --vm-driver\n\t- Use --force to override this connectivity check": "", "minikube profile was successfully set to {{.profile_name}}": "", "minikube status --output OUTPUT. json, text": "", "minikube {{.version}} is available! Download it: {{.url}}": "minikube {{.version}} jest dostępne! Pobierz je z: {{.url}}", @@ -599,14 +636,16 @@ "mount failed": "Montowanie się nie powiodło", "namespaces to pause": "", "namespaces to unpause": "", + "none driver does not support multi-node clusters": "", "not enough arguments ({{.ArgCount}}).\\nusage: minikube config set PROPERTY_NAME PROPERTY_VALUE": "", "pause containers": "", "profile sets the current minikube profile, or gets the current profile if no arguments are provided. This is used to run and manage multiple minikube instance. You can return to the default minikube profile by running `minikube profile default`": "", - "profile {{.name}} is not running.": "", "reload cached images.": "", "reloads images previously added using the 'cache add' subcommand": "", "retrieving node": "", + "saving node": "", "service {{.namespace_name}}/{{.service_name}} has no node port": "", + "startup failed": "", "stat failed": "", "status json failure": "", "status text failure": "", @@ -631,18 +670,20 @@ "usage: minikube config unset PROPERTY_NAME": "", "usage: minikube delete": "", "usage: minikube profile [MINIKUBE_PROFILE_NAME]": "", + "version json failure": "", + "version yaml failure": "", "zsh completion failed": "", + "{{ .name }}: {{ .rejection }}": "", "{{.addonName}} was successfully enabled": "{{.addonName}} został aktywowany pomyślnie", + "{{.driver_name}} \"{{.cluster}}\" {{.machine_type}} is missing, will recreate.": "", "{{.driver}} does not appear to be installed": "", "{{.driver}} does not appear to be installed, but is specified by an existing profile. Please run 'minikube delete' or install {{.driver}}": "", "{{.extra_option_component_name}}.{{.key}}={{.value}}": "", - "{{.machine}} IP has been updated to point at {{.ip}}": "", - "{{.machine}} IP was already correctly configured for {{.ip}}": "", "{{.name}} cluster does not exist": "Klaster {{.name}} nie istnieje", "{{.name}} has no available configuration options": "{{.name}} nie posiada opcji configuracji", "{{.name}} is already running": "", "{{.name}} was successfully configured": "{{.name}} skonfigurowano pomyślnie", - "{{.path}} is version {{.client_version}}, and is incompatible with Kubernetes {{.cluster_version}}. You will need to update {{.path}} or use 'minikube kubectl' to connect with this cluster": "", + "{{.path}} is v{{.client_version}}, which may be incompatible with Kubernetes v{{.cluster_version}}.": "", "{{.prefix}}minikube {{.version}} on {{.platform}}": "{{.prefix}}minikube {{.version}} na {{.platform}}", "{{.type}} is not yet a supported filesystem. We will try anyways!": "{{.type}} nie jest wspierany przez system plików. I tak spróbujemy!", "{{.url}} is not accessible: {{.error}}": "" diff --git a/translations/zh-CN.json b/translations/zh-CN.json index 1973fd0f72..7b5d40ad81 100644 --- a/translations/zh-CN.json +++ b/translations/zh-CN.json @@ -1,12 +1,12 @@ { "\"The '{{.minikube_addon}}' addon is disabled": "", + "\"{{.context}}\" context has been updated to point to {{.hostname}}:{{.port}}": "", + "\"{{.machineName}}\" does not exist, nothing to stop": "", "\"{{.minikube_addon}}\" was successfully disabled": "已成功禁用 \"{{.minikube_addon}}\"", "\"{{.name}}\" cluster does not exist. Proceeding ahead with cleanup.": "\"{{.name}}\" 集群不存在,将继续清理", "\"{{.name}}\" profile does not exist": "“{{.name}}”配置文件不存在", "\"{{.name}}\" profile does not exist, trying anyways.": "", - "\"{{.node_name}}\" stopped.": "", "\"{{.profile_name}}\" VM does not exist, nothing to stop": "\"{{.profile_name}}\" 虚拟机不存在,没有什么可供停止的", - "\"{{.profile_name}}\" does not exist, nothing to stop": "", "\"{{.profile_name}}\" host does not exist, unable to show an IP": "\"{{.profile_name}}\" 主机不存在,无法显示其IP", "\"{{.profile_name}}\" stopped.": "\"{{.profile_name}}\" 已停止", "'none' driver does not support 'minikube docker-env' command": "'none' 驱动不支持 'minikube docker-env' 命令", @@ -14,9 +14,8 @@ "'none' driver does not support 'minikube podman-env' command": "", "'none' driver does not support 'minikube ssh' command": "'none' 驱动不支持 'minikube ssh' 命令", "'{{.driver}}' driver reported an issue: {{.error}}": "'{{.driver}}' 驱动程序报告了一个问题: {{.error}}", - "'{{.profile}}' is not running": "", - "- {{.profile}}": "", "A VPN or firewall is interfering with HTTP access to the minikube VM. Alternatively, try a different VM driver: https://minikube.sigs.k8s.io/docs/start/": "VPN 或者防火墙正在干扰对 minikube 虚拟机的 HTTP 访问。或者,您可以使用其它的虚拟机驱动:https://minikube.sigs.k8s.io/docs/start/", + "A firewall is blocking Docker the minikube VM from reaching the image repository. You may need to select --image-repository, or use a proxy.": "", "A firewall is blocking Docker the minikube VM from reaching the internet. You may need to configure it to use a proxy.": "防火墙正在阻止 minikube 虚拟机中的 Docker 访问互联网。您可能需要对其进行配置为使用代理", "A firewall is blocking Docker within the minikube VM from reaching the internet. You may need to configure it to use a proxy.": "防火墙正在阻止 minikube 虚拟机中的 Docker 访问互联网,您可能需要对其进行配置为使用代理", "A firewall is interfering with minikube's ability to make outgoing HTTPS requests. You may need to change the value of the HTTPS_PROXY environment variable.": "防火墙正在干扰 minikube 发送 HTTPS 请求的能力,您可能需要改变 HTTPS_PROXY 环境变量的值", @@ -37,16 +36,17 @@ "Adds a node to the given cluster config, and starts it.": "", "Adds a node to the given cluster.": "", "Advanced Commands:": "高级命令:", - "After minikube restart the dockerd ports might have changed. To ensure docker-env works properly.\nPlease re-eval the docker-env command:\n\n\t'minikube -p {{.profile_name}} docker-env'": "", "Aliases": "别名", "Allow user prompts for more information": "允许用户提示以获取更多信息", "Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to \\\"auto\\\" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers": "用于从中拉取 docker 镜像的备选镜像存储库。如果您对 gcr.io 的访问受到限制,则可以使用该镜像存储库。将镜像存储库设置为“auto”可让 minikube 为您选择一个存储库。对于中国大陆用户,您可以使用本地 gcr.io 镜像,例如 registry.cn-hangzhou.aliyuncs.com/google_containers", "Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g)": "为 minikube 虚拟机分配的 RAM 容量(格式:\u003c数字\u003e[\u003c单位\u003e],其中单位 = b、k、m 或 g)", "Amount of RAM allocated to the minikube VM (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "为 minikube 虚拟机分配的 RAM 容量(格式:\u003c数字\u003e[\u003c单位\u003e],其中单位 = b、k、m 或 g)。", + "Amount of RAM to allocate to Kubernetes (format: \u003cnumber\u003e[\u003cunit\u003e], where unit = b, k, m or g).": "", "Amount of time to wait for a service in seconds": "等待服务的时间(单位秒)", "Amount of time to wait for service in seconds": "等待服务的时间(单位秒)", "Another hypervisor, such as VirtualBox, is conflicting with KVM. Please stop the other hypervisor, or use --driver to switch to it.": "", "Another hypervisor, such as VirtualBox, is conflicting with KVM. Please stop the other hypervisor, or use --vm-driver to switch to it.": "另外一个管理程序与 KVM 产生了冲突,如 VirtualBox。请停止其他的管理程序", + "Another program is using a file required by minikube. If you are using Hyper-V, try stopping the minikube VM from within the Hyper-V manager": "", "Automatically selected the '{{.driver}}' driver": "自动选择 '{{.driver}}' 驱动", "Automatically selected the '{{.driver}}' driver (alternates: {{.alternates}})": "自动选择 '{{.driver}}' 驱动(可选项:{{.alternates}})", "Automatically selected the {{.driver}} driver": "", @@ -56,16 +56,19 @@ "Because you are using docker driver on Mac, the terminal needs to be open to run it.": "", "Bind Address: {{.Address}}": "", "Block until the apiserver is servicing API requests": "阻塞直到 apiserver 为 API 请求提供服务", + "Both driver={{.driver}} and vm-driver={{.vmd}} have been set.\n\n Since vm-driver is deprecated, minikube will default to driver={{.driver}}.\n\n If vm-driver is set in the global config, please run \"minikube config unset vm-driver\" to resolve this warning.": "", "Cannot find directory {{.path}} for mount": "找不到用来挂载的 {{.path}} 目录", "Cannot use both --output and --format options": "不能同时使用 --output 和 --format 选项", "Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start": "检查 'journalctl -xeu kubelet' 的输出,尝试启动 minikube 时添加参数 --extra-config=kubelet.cgroup-driver=systemd", "Check that SELinux is disabled, and that the provided apiserver flags are valid": "检查 SELinux 是否禁用,且提供的 apiserver 标志是否有效", "Check that minikube is running and that you have specified the correct namespace (-n flag) if required.": "检测 minikube 是否正在运行,以及是否根据需要指定了正确的 namespace (-n 标志)", "Check that the provided apiserver flags are valid": "检查提供的 apiserver 标志是否有效", + "Check that the provided apiserver flags are valid, and that SELinux is disabled": "", "Check that your --kubernetes-version has a leading 'v'. For example: 'v1.1.14'": "检测您的 --kubernetes-version 前面是否有 'v', 例如:'v1.1.14", "Check that your apiserver flags are valid, or run 'minikube delete'": "请检查您的 apiserver 标志是否有效,或者允许 'minikube delete'", "Check your firewall rules for interference, and run 'virt-host-validate' to check for KVM configuration issues. If you are running minikube within a VM, consider using --driver=none": "", "Check your firewall rules for interference, and run 'virt-host-validate' to check for KVM configuration issues. If you are running minikube within a VM, consider using --vm-driver=none": "检查您的防火墙规则是否存在干扰,然后运行 'virt-host-validate' 以检查 KVM 配置问题,如果在虚拟机中运行minikube,请考虑使用 --vm-driver=none", + "Choose a smaller value for --memory, such as 2000": "", "Configuration and Management Commands:": "配置和管理命令:", "Configure a default route on this Linux host, or use another --driver that does not require it": "", "Configure a default route on this Linux host, or use another --vm-driver that does not require it": "为当前 Linux 主机配置一个默认的路由, 或者使用另一个不需要他的 --vm-driver", @@ -80,11 +83,12 @@ "Could not process errors from failed deletion": "无法处理删除失败的错误", "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.": "需要使用的镜像镜像的国家/地区代码。留空以使用全球代码。对于中国大陆用户,请将其设置为 cn。", "Created a new profile : {{.profile_name}}": "创建了新的配置文件:{{.profile_name}}", - "Creating Kubernetes in {{.driver_name}} container with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "", "Creating Kubernetes in {{.driver_name}} container with (CPUs={{.number_of_cpus}}), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "正在 {{.driver_name}} 容器中 创建 Kubernetes,(CPUs={{.number_of_cpus}}), 内存={{.memory_size}}MB ({{.host_memory_size}}MB 可用", + "Creating Kubernetes in {{.driver_name}} {{.machine_type}} with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "", "Creating a new profile failed": "创建新的配置文件失败", "Creating mount {{.name}} ...": "正在创建装载 {{.name}}…", "Creating {{.driver_name}} VM (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "正在创建 {{.driver_name}} 虚拟机(CPUs={{.number_of_cpus}},Memory={{.memory_size}}MB, Disk={{.disk_size}}MB)...", + "Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", "DEPRECATED, use `driver` instead.": "", "Default group id used for the mount": "用于挂载默认的 group id", "Default user id used for the mount": "用于挂载默认的 user id", @@ -112,10 +116,11 @@ "Done! kubectl is now configured to use \"{{.name}}\"": "完成!kubectl 已经配置至 \"{{.name}}\"", "Done! kubectl is now configured to use {{.name}}": "完成!kubectl已经配置至{{.name}}", "Download complete!": "下载完成!", + "Downloading Kubernetes {{.version}} preload ...": "", "Downloading VM boot image ...": "正在下载 VM boot image...", "Downloading driver {{.driver}}:": "正在下载驱动 {{.driver}}:", - "Downloading preloaded images tarball for k8s {{.version}} ...": "", "Downloading {{.name}} {{.version}}": "正在下载 {{.name}} {{.version}}", + "Due to {{.driver_name}} networking limitations on {{.os_name}}, {{.addon_name}} addon is not supported for this driver.\nAlternatively to use this addon you can use a vm-based driver:\n\n\t'minikube start --vm=true'\n\nTo track the update on this work in progress feature please check:\nhttps://github.com/kubernetes/minikube/issues/7332": "", "ERROR creating `registry-creds-acr` secret": "", "ERROR creating `registry-creds-dpr` secret": "创建 `registry-creds-dpr` secret 时出错", "ERROR creating `registry-creds-ecr` secret: {{.error}}": "创建 `registry-creds-ecr` secret 时出错:{{.error}}", @@ -138,7 +143,6 @@ "Ensure that the user listed in /etc/libvirt/qemu.conf has access to your home directory": "确保 /etc/libvirt/qemu.conf 中列出的用户具备访问您 home 目录的权限", "Ensure that your value for HTTPS_PROXY points to an HTTPS proxy rather than an HTTP proxy": "确保您配置的 HTTPS_PROXY 指向了 HTTPS 代理而不是 HTTP 代理", "Environment variables to pass to the Docker daemon. (format: key=value)": "传递给 Docker 守护进程的环境变量。(格式:键值对)", - "Error adding node to cluster": "", "Error checking driver version: {{.error}}": "检查驱动程序版本时出错:{{.error}}", "Error converting status to json": "转换状态为 json 时出错", "Error creating list template": "创建 list template 时出错", @@ -162,21 +166,17 @@ "Error getting cluster bootstrapper": "获取 cluster bootstrapper 时出错", "Error getting cluster config": "", "Error getting config": "获取 config 时出错", - "Error getting control plane": "", "Error getting host": "获取 host 时出错", - "Error getting host IP": "", "Error getting host status": "获取 host status 时出错", "Error getting machine logs": "获取 machine logs 时出错", "Error getting machine status": "获取 machine status 时出错", "Error getting port binding for '{{.driver_name}} driver: {{.error}}": "", "Error getting primary control plane": "", - "Error getting primary cp": "", "Error getting profiles to delete": "获取待删除配置文件时出错", "Error getting service status": "获取 service status 时出错", "Error getting service with namespace: {{.namespace}} and labels {{.labelName}}:{{.addonName}}: {{.error}}": "使用 namespace: {{.namespace}} 和 labels {{.labelName}}:{{.addonName}} 获取 service 时出错:{{.error}}", "Error getting ssh client": "", "Error getting the host IP address to use from within the VM": "从虚拟机中获取 host IP 地址时出错", - "Error host driver ip status": "", "Error killing mount process": "杀死 mount 进程时出错", "Error loading api": "加载 api 时出错", "Error loading profile config": "加载配置文件的配置时出错", @@ -187,11 +187,9 @@ "Error parsing minikube version: {{.error}}": "解析 minikube 版本时出错:{{.error}}", "Error reading {{.path}}: {{.error}}": "读取 {{.path}} 时出错:{{.error}}", "Error restarting cluster": "重启 cluster 时出错", - "Error retrieving node": "", "Error setting shell variables": "设置 shell 变量时出错", "Error starting cluster": "开启 cluster 时出错", "Error starting mount": "开启 mount 时出错", - "Error starting node": "", "Error unsetting shell variables": "取消设置 shell 变量时出错", "Error while setting kubectl current context : {{.error}}": "设置 kubectl 上下文时出错 :{{.error}}", "Error writing mount pid": "写入 mount pid 时出错", @@ -199,6 +197,8 @@ "Error: You have selected Kubernetes v{{.new}}, but the existing cluster for your profile is running Kubernetes v{{.old}}. Non-destructive downgrades are not supported, but you can proceed by performing one of the following options:\n* Recreate the cluster using Kubernetes v{{.new}}: Run \"minikube delete {{.profile}}\", then \"minikube start {{.profile}} --kubernetes-version={{.new}}\"\n* Create a second cluster with Kubernetes v{{.new}}: Run \"minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}\"\n* Reuse the existing cluster with Kubernetes v{{.old}} or newer: Run \"minikube start {{.profile}} --kubernetes-version={{.old}}": "错误:您已选择 Kubernetes v{{.new}},但您的配置文件的现有集群正在运行 Kubernetes v{{.old}}。非破坏性降级不受支持,但若要继续操作,您可以执行以下选项之一:\n* 使用 Kubernetes v{{.new}} 重新创建现有集群:运行“minikube delete {{.profile}}”,然后运行“minikube start {{.profile}} --kubernetes-version={{.new}}”\n* 使用 Kubernetes v{{.new}} 再创建一个集群:运行“minikube start -p \u003cnew name\u003e --kubernetes-version={{.new}}”\n* 通过 Kubernetes v{{.old}} 或更高版本重复使用现有集群:运行“minikube start {{.profile}} --kubernetes-version={{.old}}”", "Error: [{{.id}}] {{.error}}": "错误:[{{.id}}] {{.error}}", "Examples": "示例", + "Executing \"{{.command}}\" took an unusually long time: {{.duration}}": "", + "Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'": "", "Exiting": "正在退出", "Exiting due to driver incompatibility": "由于驱动程序不兼容而退出", "Exiting.": "正在退出。", @@ -213,14 +213,15 @@ "Failed to change permissions for {{.minikube_dir_path}}: {{.error}}": "未能更改 {{.minikube_dir_path}} 的权限:{{.error}}", "Failed to check if machine exists": "无法检测机器是否存在", "Failed to check main repository and mirrors for images for images": "无法检测主仓库和镜像仓库中的镜像", + "Failed to delete cluster {{.name}}, proceeding with retry anyway.": "", "Failed to delete cluster: {{.error}}": "未能删除集群:{{.error}}", "Failed to delete cluster: {{.error}}__1": "未能删除集群:{{.error}}", "Failed to delete images": "删除镜像时失败", "Failed to delete images from config": "无法删除配置的镜像", - "Failed to delete node {{.name}}": "", "Failed to download kubectl": "下载 kubectl 失败", "Failed to enable container runtime": "", "Failed to generate config": "无法生成配置", + "Failed to get API Server URL": "", "Failed to get bootstrapper": "获取 bootstrapper 失败", "Failed to get command runner": "", "Failed to get driver URL": "获取 driver URL 失败", @@ -236,21 +237,25 @@ "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.": "未能设置 NO_PROXY 环境变量。请使用“export NO_PROXY=$NO_PROXY,{{.ip}}”。", "Failed to setup certs": "设置 certs 失败", "Failed to setup kubeconfig": "设置 kubeconfig 失败", - "Failed to start node {{.name}}": "", "Failed to stop node {{.name}}": "", "Failed to update cluster": "更新 cluster 失败", "Failed to update config": "更新 config 失败", + "Failed to validate '{{.driver}}' driver": "", "Failed unmount: {{.error}}": "unmount 失败:{{.error}}", "File permissions used for the mount": "用于 mount 的文件权限", + "Filter to use only VM Drivers": "", "Flags": "标志", "Follow": "跟踪", "For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "为获得最佳结果,请安装 kubectl:https://kubernetes.io/docs/tasks/tools/install-kubectl/", "For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/__1": "为获得最佳结果,请安装 kubectl:https://kubernetes.io/docs/tasks/tools/install-kubectl/", "For more information, see:": "如需了解详情,请参阅:", + "For more information, see: https://minikube.sigs.k8s.io/docs/reference/drivers/none/": "", "Force environment to be configured for a specified shell: [fish, cmd, powershell, tcsh, bash, zsh], default is auto-detect": "强制为指定的 shell 配置环境:[fish, cmd, powershell, tcsh, bash, zsh],默认为 auto-detect", "Force minikube to perform possibly dangerous operations": "强制 minikube 执行可能有风险的操作", "Found network options:": "找到的网络选项:", "Found {{.number}} invalid profile(s) !": "找到 {{.number}} 个无效的配置文件!", + "Generate unable to parse disk size '{{.diskSize}}': {{.error}}": "", + "Generate unable to parse memory '{{.memory}}': {{.error}}": "", "Gets the kubernetes URL(s) for the specified service in your local cluster": "获取本地集群中指定服务的 kubernetes URL", "Gets the kubernetes URL(s) for the specified service in your local cluster. In the case of multiple URLs they will be printed one at a time.": "获取本地集群中指定服务的 kubernetes URL。如果有多个 URL,他们将一次打印一个", "Gets the logs of the running instance, used for debugging minikube, not user code.": "获取正在运行的实例日志,用于调试 minikube,不是用户代码", @@ -270,6 +275,8 @@ "Hyperkit networking is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --driver": "", "Hyperkit networking is broken. Upgrade to the latest hyperkit version and/or Docker for Desktop. Alternatively, you may choose an alternate --vm-driver": "Hyperkit 的网络挂了。升级到最新的 hyperkit 版本以及/或者 Docker 桌面版。或者,你可以通过 --vm-driver 切换其他选项", "If set, automatically updates drivers to the latest version. Defaults to true.": "如果设置了,将自动更新驱动到最新版本。默认为 true。", + "If set, delete the current cluster if start fails and try again. Defaults to false.": "", + "If set, download tarball of preloaded images if available to improve start time. Defaults to true.": "", "If set, install addons. Defaults to true.": "", "If set, pause all namespaces": "", "If set, unpause all namespaces": "", @@ -286,12 +293,12 @@ "Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added.": "传递给 Docker 守护进程的不安全 Docker 注册表。系统会自动添加默认服务 CIDR 范围。", "Install VirtualBox, or select an alternative value for --driver": "", "Install the latest hyperkit binary, and run 'minikube delete'": "", - "Invalid size passed in argument: {{.error}}": "", "IsEnabled failed": "", + "Istio needs {{.minCPUs}} CPUs -- your configuration only allocates {{.cpus}} CPUs": "", + "Istio needs {{.minMem}}MB of memory -- your configuration only allocates {{.memory}}MB": "", "Kill the mount process spawned by minikube start": "", "Kubernetes {{.new}} is now available. If you would like to upgrade, specify: --kubernetes-version={{.new}}": "Kubernetes {{.new}} 现在可用了。如果您想升级,请指定 --kubernetes-version={{.new}}", "Kubernetes {{.version}} is not supported by this release of minikube": "当前版本的 minukube 不支持 Kubernetes {{.version}}", - "Launching Kubernetes ...": "", "Launching Kubernetes ... ": "正在启动 Kubernetes ... ", "Launching proxy ...": "", "List all available images from the local cache.": "", @@ -303,7 +310,7 @@ "Local folders to share with Guest via NFS mounts (hyperkit driver only)": "通过 NFS 装载与访客共享的本地文件夹(仅限 hyperkit 驱动程序)", "Location of the VPNKit socket used for networking. If empty, disables Hyperkit VPNKitSock, if 'auto' uses Docker for Mac VPNKit connection, otherwise uses the specified VSock (hyperkit driver only)": "用于网络连接的 VPNKit 套接字的位置。如果为空,则停用 Hyperkit VPNKitSock;如果为“auto”,则将 Docker 用于 Mac VPNKit 连接;否则使用指定的 VSock(仅限 hyperkit 驱动程序)", "Location of the minikube iso": "minikube iso 的位置", - "Location of the minikube iso.": "", + "Locations to fetch the minikube ISO from.": "", "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'": "", "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'.": "", "Message Size: {{.size}}": "", @@ -311,6 +318,7 @@ "Minikube is a tool for managing local Kubernetes clusters.": "", "Modify minikube config": "修改 minikube 配置", "Modify minikube's kubernetes addons": "修改 minikube 的 kubernetes 插件", + "Most users should use the newer 'docker' driver instead, which does not require root!": "", "Mount type: {{.name}}": "", "Mounting host path {{.sourcePath}} into VM as {{.destinationPath}} ...": "", "Mounts the specified directory into minikube": "将指定的目录挂载到 minikube", @@ -320,18 +328,23 @@ "NIC Type used for host only network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only)": "网卡类型仅用于主机网络。Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM 之一,或 virtio(仅限 VirtualBox 驱动程序)", "NOTE: This process must stay alive for the mount to be accessible ...": "", "Networking and Connectivity Commands:": "网络和连接命令:", + "No changes required for the \"{{.context}}\" context": "", "No minikube profile was found. You can create one using `minikube start`.": "", - "Node may be unable to resolve external DNS records": "", + "Node \"{{.node_name}}\" stopped.": "", "Node operations": "", + "Node {{.name}} failed to start, deleting and trying again.": "", "Node {{.name}} was successfully deleted.": "", + "Node {{.nodeName}} does not exist.": "", + "Non-destructive downgrades are not supported, but you can proceed with one of the following options:\n\n 1) Recreate the cluster with Kubernetes v{{.new}}, by running:\n\n minikube delete{{.profile}}\n minikube start{{.profile}} --kubernetes-version={{.new}}\n\n 2) Create a second cluster with Kubernetes v{{.new}}, by running:\n\n minikube start -p {{.suggestedName}} --kubernetes-version={{.new}}\n\n 3) Use the existing cluster at version Kubernetes v{{.old}}, by running:\n\n minikube start{{.profile}} --kubernetes-version={{.old}}": "", "None of the known repositories in your location are accessible. Using {{.image_repository_name}} as fallback.": "您所在位置的已知存储库都无法访问。正在将 {{.image_repository_name}} 用作后备存储库。", "None of the known repositories is accessible. Consider specifying an alternative image repository with --image-repository flag": "已知存储库都无法访问。请考虑使用 --image-repository 标志指定备选镜像存储库", "Not passing {{.name}}={{.value}} to docker env.": "", - "Noticed that you are using minikube docker-env:": "", + "Noticed you have an activated docker-env on {{.driver_name}} driver in this terminal:": "", + "Number of CPUs allocated to Kubernetes.": "", "Number of CPUs allocated to the minikube VM": "分配给 minikube 虚拟机的 CPU 的数量", - "Number of CPUs allocated to the minikube VM.": "", "Number of lines back to go within the log": "", "OS release is {{.pretty_name}}": "", + "One of 'yaml' or 'json'.": "", "Open the addons URL with https instead of http": "", "Open the service URL with https instead of http": "", "Opening kubernetes service {{.namespace_name}}/{{.service_name}} in default browser...": "", @@ -350,19 +363,23 @@ "Please install the minikube hyperkit VM driver, or select an alternative --driver": "", "Please install the minikube kvm2 VM driver, or select an alternative --driver": "", "Please make sure the service you are looking for is deployed or is in the correct namespace.": "", + "Please re-eval your docker-env, To ensure your environment variables have updated ports: \n\n\t'minikube -p {{.profile_name}} docker-env'": "", "Please specify the directory to be mounted: \n\tminikube mount \u003csource directory\u003e:\u003ctarget directory\u003e (example: \"/host-home:/vm-home\")": "", "Please upgrade the '{{.driver_executable}}'. {{.documentation_url}}": "请升级“{{.driver_executable}}”。{{.documentation_url}}", "Populates the specified folder with documentation in markdown about minikube": "", "Powering off \"{{.profile_name}}\" via SSH ...": "正在通过 SSH 关闭“{{.profile_name}}”…", "Preparing Kubernetes {{.k8sVersion}} on {{.runtime}} {{.runtimeVersion}} ...": "正在 {{.runtime}} {{.runtimeVersion}} 中准备 Kubernetes {{.k8sVersion}}…", "Print current and latest version number": "打印当前和最新版本版本", + "Print just the version number.": "", "Print the version of minikube": "打印 minikube 版本", "Print the version of minikube.": "打印 minikube 版本。", "Problems detected in {{.entry}}:": "在 {{.entry}} 中 检测到问题:", "Problems detected in {{.name}}:": "在 {{.name}} 中 检测到问题:", "Profile gets or sets the current minikube profile": "获取或设置当前的 minikube 配置文件", "Profile name \"{{.profilename}}\" is minikube keyword. To delete profile use command minikube delete -p \u003cprofile name\u003e": "配置文件名称 \"{{.profilename}}\" 是 minikube 的一个关键字。使用 minikube delete -p \u003cprofile name\u003e 命令 删除配置文件", + "Profile name \"{{.profilename}}\" is reserved keyword. To delete this profile, run: \"{{.cmd}}\"": "", "Provide VM UUID to restore MAC address (hyperkit driver only)": "提供虚拟机 UUID 以恢复 MAC 地址(仅限 hyperkit 驱动程序)", + "Pulling base image ...": "", "Pulling images ...": "拉取镜像 ...", "Reboot to complete VirtualBox installation, verify that VirtualBox is not blocked by your system, and/or use another hypervisor": "重启以完成 VirtualBox 安装,检查 VirtualBox 未被您的操作系统禁用,或者使用其他的管理程序。", "Rebuild libvirt with virt-network support": "", @@ -371,30 +388,41 @@ "Registry mirrors to pass to the Docker daemon": "传递给 Docker 守护进程的注册表镜像", "Reinstall VirtualBox and reboot. Alternatively, try the kvm2 driver: https://minikube.sigs.k8s.io/docs/reference/drivers/kvm2/": "", "Reinstall VirtualBox and verify that it is not blocked: System Preferences -\u003e Security \u0026 Privacy -\u003e General -\u003e Some system software was blocked from loading": "", + "Related issue: {{.url}}": "", "Related issues:": "相关问题:", "Relaunching Kubernetes using {{.bootstrapper}} ...": "正在使用 {{.bootstrapper}} 重新启动 Kubernetes…", + "Remove the incompatible --docker-opt flag if one was provided": "", "Removed all traces of the \"{{.name}}\" cluster.": "", "Removing {{.directory}} ...": "正在移除 {{.directory}}…", "Requested cpu count {{.requested_cpus}} is less than the minimum allowed of {{.minimum_cpus}}": "请求的 CPU 数量 {{.requested_cpus}} 小于允许的最小值 {{.minimum_cpus}}", "Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}": "请求的磁盘大小 {{.requested_size}} 小于最小值 {{.minimum_size}}", "Requested memory allocation ({{.memory}}MB) is less than the default memory allocation of {{.default_memorysize}}MB. Beware that minikube might not work correctly or crash unexpectedly.": "请求的内存分配 ({{.memory}}MB) 小于默认内存分配 {{.default_memorysize}}MB。请注意 minikube 可能无法正常运行或可能会意外崩溃。", + "Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommended}}MB. Kubernetes may crash unexpectedly.": "", "Requested memory allocation {{.requested_size}} is less than the minimum allowed of {{.minimum_size}}": "请求的内存分配 {{.requested_size}} 小于允许的 {{.minimum_size}} 最小值", + "Requested memory allocation {{.requested}}MB is less than the usable minimum of {{.minimum}}MB": "", + "Restart Docker": "", + "Restarting existing {{.driver_name}} {{.machine_type}} for \"{{.cluster}}\" ...": "", + "Restarting the {{.name}} service may improve performance.": "", "Retrieve the ssh identity key path of the specified cluster": "检索指定集群的 ssh 密钥路径", "Retrieve the ssh identity key path of the specified cluster.": "检索指定集群的 ssh 密钥路径。", "Retrieves the IP address of the running cluster": "检索正在运行的群集的 IP 地址", "Retrieves the IP address of the running cluster, and writes it to STDOUT.": "", "Retrieves the IP address of the running cluster, checks it\n\t\t\twith IP in kubeconfig, and corrects kubeconfig if incorrect.": "", "Returns the value of PROPERTY_NAME from the minikube config file. Can be overwritten at runtime by flags or environmental variables.": "", + "Right-click the PowerShell icon and select Run as Administrator to open PowerShell in elevated mode.": "", "Run 'kubectl describe pod coredns -n kube-system' and check for a firewall or DNS conflict": "", "Run 'minikube delete' to delete the stale VM, or and ensure that minikube is running as the same user you are issuing this command with": "执行 'minikube delete' 以删除过时的虚拟机,或者确保 minikube 以与您发出此命令的用户相同的用户身份运行", + "Run 'sudo sysctl fs.protected_regular=1', or try a driver which does not require root, such as '--driver=docker'": "", "Run kubectl": "运行 kubectl", "Run minikube from the C: drive.": "", "Run the kubernetes client, download it if necessary. Remember -- after kubectl!\n\nExamples:\nminikube kubectl -- --help\nminikube kubectl -- get pods --namespace kube-system": "", - "Run the minikube command as an Administrator": "", "Run: 'chmod 600 $HOME/.kube/config'": "执行 'chmod 600 $HOME/.kube/config'", + "Run: 'kubectl delete clusterrolebinding kubernetes-dashboard'": "", + "Run: 'sudo mkdir /sys/fs/cgroup/systemd \u0026\u0026 sudo mount -t cgroup -o none,name=systemd cgroup /sys/fs/cgroup/systemd'": "", "Running on localhost (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", "Selecting '{{.driver}}' driver from existing profile (alternates: {{.alternates}})": "从现有配置文件中选择 '{{.driver}}' 驱动程序 (可选:{{.alternates}})", "Selecting '{{.driver}}' driver from user configuration (alternates: {{.alternates}})": "从用户配置中选择 {{.driver}}' 驱动程序(可选:{{.alternates}})", + "Service '{{.service}}' was not found in '{{.namespace}}' namespace.\nYou may select another namespace by using 'minikube service {{.service}} -n \u003cnamespace\u003e'. Or list out all the services using 'minikube service list'": "", "Set failed": "", "Set flag to delete all profiles": "设置标志以删除所有配置文件", "Set this flag to delete the '.minikube' folder from your user directory.": "设置这个标志来删除您用户目录下的 '.minikube' 文件夹。", @@ -409,6 +437,7 @@ "Show only log entries which point to known problems": "", "Show only the most recent journal entries, and continuously print new entries as they are appended to the journal.": "", "Skipped switching kubectl context for {{.profile_name}} because --keep-context was set.": "", + "Sorry, Kubernetes v{{.k8sVersion}} requires conntrack to be installed in root's path": "", "Sorry, Kubernetes {{.version}} is not supported by this release of minikube": "", "Sorry, completion support is not yet implemented for {{.name}}": "", "Sorry, the kubeadm.{{.parameter_name}} parameter is currently not supported by --extra-config": "抱歉,--extra-config 目前不支持 kubeadm.{{.parameter_name}} 参数", @@ -420,8 +449,10 @@ "Specify the 9p version that the mount should use": "", "Specify the ip that the mount should be setup on": "", "Specify the mount filesystem type (supported types: 9p)": "", - "Starting existing {{.driver_name}} VM for \"{{.profile_name}}\" ...": "", - "Starting node": "", + "Start failed after cluster deletion": "", + "StartHost failed, but will try again: {{.error}}": "", + "Starting control plane node {{.name}} in cluster {{.cluster}}": "", + "Starting node {{.name}} in cluster {{.cluster}}": "", "Starting tunnel for service {{.service}}.": "", "Starts a local kubernetes cluster": "启动本地 kubernetes 集群", "Starts a node.": "", @@ -440,13 +471,14 @@ "Suggestion: {{.advice}}": "建议:{{.advice}}", "Suggestion: {{.fix}}": "建议:{{.fix}}", "Target directory {{.path}} must be an absolute path": "", - "The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube --driver={{.driver_name}}'.": "", "The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube --vm-driver={{.driver_name}}": "“{{.driver_name}}”驱动程序需要根权限。请使用“sudo minikube --vm-driver={{.driver_name}}”运行 minikube", + "The \"{{.driver_name}}\" driver requires root privileges. Please run minikube using 'sudo minikube start --driver={{.driver_name}}'.": "", "The \"{{.driver_name}}\" driver should not be used with root privileges.": "", "The \"{{.name}}\" cluster has been deleted.": "“{{.name}}”集群已删除。", "The \"{{.name}}\" cluster has been deleted.__1": "“{{.name}}”集群已删除。", "The 'none' driver does not respect the --cpus flag": "'none' 驱动程序不遵循 --cpus 标志", "The 'none' driver does not respect the --memory flag": "'none' 驱动程序不遵循 --memory 标志", + "The 'none' driver is designed for experts who need to integrate with an existing VM": "", "The 'none' driver provides limited isolation and may reduce system security and reliability.": "“none”驱动程序提供有限的隔离功能,并且可能会降低系统安全性和可靠性。", "The '{{.addonName}}' addon is enabled": "启动 '{{.addonName}}' 插件", "The '{{.driver}}' driver requires elevated permissions. The following commands will be executed:\\n\\n{{ .example }}\\n": "'{{.driver}}' 驱动程序需要提升权限,将执行以下命令:\\n\\n{{ .example }}\\n", @@ -462,19 +494,23 @@ "The VM driver exited with an error, and may be corrupt. Run 'minikube start' with --alsologtostderr -v=8 to see the error": "", "The VM that minikube is configured for no longer exists. Run 'minikube delete'": "", "The apiserver listening port": "apiserver 侦听端口", - "The apiserver name which is used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine": "", "The apiserver name which is used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine": "在为 kubernetes 生成的证书中使用的 apiserver 名称。如果您希望将此 apiserver 设置为可从机器外部访问,则可以使用这组 apiserver 名称", "The argument to pass the minikube mount command on start": "用于在启动时传递 minikube 装载命令的参数", "The argument to pass the minikube mount command on start.": "", + "The authoritative apiserver hostname for apiserver certificates and connectivity. This can be used if you want to make the apiserver available from outside the machine": "", "The cluster dns domain name used in the kubernetes cluster": "kubernetes 集群中使用的集群 dns 域名", "The container runtime to be used (docker, crio, containerd)": "需要使用的容器运行时(docker、crio、containerd)", "The container runtime to be used (docker, crio, containerd).": "", + "The control plane for \"{{.name}}\" is paused!": "", + "The control plane node \"{{.name}}\" does not exist.": "", + "The control plane node is not running (state={{.state}})": "", + "The control plane node must be running for this command": "", "The cri socket path to be used": "需要使用的 cri 套接字路径", "The cri socket path to be used.": "", - "The docker service within '{{.profile}}' is not active": "", + "The docker service within '{{.name}}' is not active": "", + "The docker-env command is only compatible with the \"docker\" runtime, but this cluster was configured to use the \"{{.runtime}}\" runtime.": "", "The driver '{{.driver}}' is not supported on {{.os}}": "{{.os}} 不支持驱动程序“{{.driver}}”", - "The driver {{.experimental}} '{{.driver}}' is not supported on {{.os}}": "", - "The existing \"{{.profile_name}}\" VM that was created using the \"{{.old_driver}}\" driver, and is incompatible with the \"{{.driver}}\" driver.": "", + "The existing \"{{.name}}\" VM was created using the \"{{.old}}\" driver, and is incompatible with the \"{{.new}}\" driver.": "", "The hyperv virtual switch name. Defaults to first found. (hyperv driver only)": "hyperv 虚拟交换机名称。默认为找到的第一个 hyperv 虚拟交换机。(仅限 hyperv 驱动程序)", "The hypervisor does not appear to be configured properly. Run 'minikube start --alsologtostderr -v=1' and inspect the error code": "管理程序似乎配置的不正确。执行 'minikube start --alsologtostderr -v=1' 并且检查错误代码", "The initial time interval for each check that wait performs in seconds": "", @@ -486,10 +522,13 @@ "The name of the node to delete": "", "The name of the node to start": "", "The node to get logs from. Defaults to the primary control plane.": "", + "The node to ssh into. Defaults to the primary control plane.": "", + "The none driver is not compatible with multi-node clusters.": "", "The number of bytes to use for 9p packet payload": "", + "The number of nodes to spin up. Defaults to 1.": "", "The output format. One of 'json', 'table'": "输出的格式。'json' 或者 'table'", "The path on the file system where the docs in markdown need to be saved": "", - "The podman service within '{{.profile}}' is not active": "", + "The podman service within '{{.cluster}}' is not active": "", "The service namespace": "", "The service {{.service}} requires privileged ports to be exposed: {{.ports}}": "", "The services namespace": "", @@ -498,46 +537,65 @@ "The value passed to --format is invalid: {{.error}}": "", "The vmwarefusion driver is deprecated and support for it will be removed in a future release.\n\t\t\tPlease consider switching to the new vmware unified driver, which is intended to replace the vmwarefusion driver.\n\t\t\tSee https://minikube.sigs.k8s.io/docs/reference/drivers/vmware/ for more information.\n\t\t\tTo disable this message, run [minikube config set ShowDriverDeprecationNotification false]": "", "The {{.driver_name}} driver should not be used with root privileges.": "不应以根权限使用 {{.driver_name}} 驱动程序。", + "There is no local cluster named \"{{.cluster}}\"": "", "There's a new version for '{{.driver_executable}}'. Please consider upgrading. {{.documentation_url}}": "“{{.driver_executable}}”有一个新版本。请考虑升级。{{.documentation_url}}", "These changes will take effect upon a minikube delete and then a minikube start": "", "This addon does not have an endpoint defined for the 'addons open' command.\nYou can add one by annotating a service with the label {{.labelName}}:{{.addonName}}": "", "This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true": "此操作还可通过设置环境变量 CHANGE_MINIKUBE_NONE_USER=true 自动完成", + "This control plane is not running! (state={{.state}})": "", + "This driver does not yet work on your architecture. Maybe try --driver=none": "", + "This is unusual - you may want to investigate using \"{{.command}}\"": "", "This will keep the existing kubectl context and will create a minikube context.": "这将保留现有 kubectl 上下文并创建 minikube 上下文。", "This will start the mount daemon and automatically mount files into minikube": "这将启动装载守护进程并将文件自动装载到 minikube 中", "This will start the mount daemon and automatically mount files into minikube.": "", + "This {{.type}} is having trouble accessing https://{{.repository}}": "", + "Tip: To remove this root owned cluster, run: sudo {{.cmd}}": "", "Tip: To remove this root owned cluster, run: sudo {{.cmd}} delete": "提示:要移除这个由根用户拥有的集群,请运行 sudo {{.cmd}} delete", "To connect to this cluster, use: kubectl --context={{.name}}": "如需连接到此集群,请使用 kubectl --context={{.name}}", "To connect to this cluster, use: kubectl --context={{.name}}__1": "如需连接到此集群,请使用 kubectl --context={{.name}}", "To connect to this cluster, use: kubectl --context={{.profile_name}}": "", "To disable this notice, run: 'minikube config set WantUpdateNotification false'\\n": "", - "To proceed, either:\n\n 1) Delete the existing \"{{.profile_name}}\" cluster using: '{{.command}} delete'\n\n * or *\n\n 2) Start the existing \"{{.profile_name}}\" cluster using: '{{.command}} start --driver={{.old_driver}}'": "", + "To fix this, run: \"{{.command}}\"": "", + "To proceed, either:\n\n1) Delete the existing \"{{.name}}\" cluster using: '{{.delcommand}}'\n\n* or *\n\n2) Start the existing \"{{.name}}\" cluster using: '{{.command}} --driver={{.old}}'": "", + "To pull new external images, you may need to configure a proxy: https://minikube.sigs.k8s.io/docs/reference/networking/proxy/": "", "To see addons list for other profiles use: `minikube addons -p name list`": "", - "To start minikube with HyperV Powershell must be in your PATH`": "", + "To start minikube with Hyper-V, Powershell must be in your PATH`": "", "To use kubectl or minikube commands as your own user, you may need to relocate them. For example, to overwrite your own settings, run:": "如需以您自己的用户身份使用 kubectl 或 minikube 命令,您可能需要重新定位该命令。例如,如需覆盖您的自定义设置,请运行:", "Troubleshooting Commands:": "故障排除命令ƒ", + "Try 'minikube delete' to force new SSL certificates to be installed": "", + "Try 'minikube delete', and disable any conflicting VPN or firewall software": "", + "Try specifying a --driver, or see https://minikube.sigs.k8s.io/docs/start/": "", "Trying to delete invalid profile {{.profile}}": "尝试删除无效的配置文件 {{.profile}}", "Unable to bind flags": "无法绑定标志", - "Unable to determine a default driver to use. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/": "", "Unable to determine a default driver to use. Try specifying --vm-driver, or see https://minikube.sigs.k8s.io/docs/start/": "无法确定要使用的默认驱动。尝试通过 --vm-dirver 指定,或者查阅 https://minikube.sigs.k8s.io/docs/start/", "Unable to enable dashboard": "", "Unable to fetch latest version info": "", + "Unable to find control plane": "", "Unable to generate docs": "", "Unable to generate the documentation. Please ensure that the path specified is a directory, exists \u0026 you have permission to write to it.": "", "Unable to get VM IP address": "", "Unable to get addon status for {{.name}}: {{.error}}": "", "Unable to get bootstrapper: {{.error}}": "无法获取引导程序:{{.error}}", + "Unable to get command runner": "", + "Unable to get control plane status: {{.error}}": "", "Unable to get current user": "", + "Unable to get forwarded endpoint": "", + "Unable to get machine status": "", "Unable to get runtime": "", "Unable to get the status of the {{.name}} cluster.": "无法获取 {{.name}} 集群状态。", "Unable to kill mount process: {{.error}}": "", "Unable to load cached images from config file.": "无法从配置文件中加载缓存的镜像。", "Unable to load cached images: {{.error}}": "", "Unable to load config: {{.error}}": "无法加载配置:{{.error}}", + "Unable to load host": "", "Unable to parse \"{{.kubernetes_version}}\": {{.error}}": "无法解析“{{.kubernetes_version}}”:{{.error}}", "Unable to parse default Kubernetes version from constants: {{.error}}": "无法从常量中解析默认的 Kubernetes 版本号: {{.error}}", + "Unable to parse memory '{{.memory}}': {{.error}}": "", "Unable to parse oldest Kubernetes version from constants: {{.error}}": "无法从常量中解析最旧的 Kubernetes 版本号: {{.error}}", + "Unable to pick a default driver. Here is what was considered, in preference order:": "", "Unable to pull images, which may be OK: {{.error}}": "无法拉取镜像,有可能是正常状况:{{.error}}", - "Unable to remove machine directory: %v": "", + "Unable to remove machine directory": "", + "Unable to restart cluster, will reset it: {{.error}}": "", "Unable to start VM. Please investigate and run 'minikube delete' if possible": "无法启动虚拟机。可能的话请检查后执行 'minikube delete'", "Unable to stop VM": "无法停止虚拟机", "Unable to update {{.driver}} driver: {{.error}}": "", @@ -549,6 +607,7 @@ "Unset the KUBECONFIG environment variable, or verify that it does not point to an empty or otherwise invalid path": "", "Unset variables instead of setting them": "", "Update server returned an empty list": "", + "Updating the running {{.driver_name}} \"{{.cluster}}\" {{.machine_type}} ...": "", "Upgrade to QEMU v3.1.0+, run 'virt-host-validate', or ensure that you are not running in a nested VM environment.": "", "Upgrading from Kubernetes {{.old}} to {{.new}}": "正在从 Kubernetes {{.old}} 升级到 {{.new}}", "Usage": "使用方法", @@ -575,6 +634,7 @@ "VM driver is one of: %v": "虚拟机驱动程序是以下项之一:%v", "VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository": "虚拟机无权访问 {{.repository}},或许您需要配置代理或者设置 --image-repository", "VM may be unable to resolve external DNS records": "虚拟机可能无法解析外部 DNS 记录", + "Validation unable to parse disk size '{{.diskSize}}': {{.error}}": "", "Verify that your HTTP_PROXY and HTTPS_PROXY environment variables are set correctly.": "验证是否正确设置了 HTTP_PROXY 和 HTTPS_PROXY 环境变量。", "Verify the IP address of the running cluster in kubeconfig.": "在 kubeconfig 中验证正在运行的集群 IP 地址。", "Verifying dashboard health ...": "正在验证 dashboard 运行情况 ...", @@ -586,9 +646,8 @@ "VirtualBox is broken. Disable real-time anti-virus software, reboot, and reinstall VirtualBox if the problem continues.": "", "VirtualBox is broken. Reinstall VirtualBox, reboot, and run 'minikube delete'.": "", "VirtualBox is unable to find its network interface. Try upgrading to the latest release and rebooting.": "VirtualBox 无法找到他的网络接口。尝试升级到最新版本并重启。", - "Virtualization support is disabled on your computer. If you are running minikube within a VM, try '--driver=none'. Otherwise, consult your systems BIOS manual for how to enable virtualization.": "", + "Virtualization support is disabled on your computer. If you are running minikube within a VM, try '--driver=docker'. Otherwise, consult your systems BIOS manual for how to enable virtualization.": "", "Virtualization support is disabled on your computer. If you are running minikube within a VM, try '--vm-driver=none'. Otherwise, consult your systems BIOS manual for how to enable virtualization.": "您的计算机禁用了虚拟化支持。如果您正在虚拟机内运行 minikube, 尝试 '--vm-driver=none'。否则,请参阅系统BIOS手册了解如何启用虚拟化。", - "Wait failed": "", "Wait failed: {{.error}}": "", "Wait until Kubernetes core services are healthy before exiting": "等到 Kubernetes 核心服务正常运行再退出", "Waiting for cluster to come online ...": "等待集群上线...", @@ -597,55 +656,58 @@ "Where to root the NFS Shares, defaults to /nfsshares (hyperkit driver only)": "NFS 共享的根目录位置,默认为 /nfsshares(仅限 hyperkit 驱动程序)", "Whether to use external switch over Default Switch if virtual switch not explicitly specified. (hyperv driver only)": "", "You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details": "您似乎正在使用代理,但您的 NO_PROXY 环境不包含 minikube IP ({{.ip_address}})。如需了解详情,请参阅 {{.documentation_url}}", + "You can also use 'minikube kubectl -- get pods' to invoke a matching version": "", "You can delete them using the following command(s):": "", + "You cannot change the CPUs for an exiting minikube cluster. Please first delete the cluster.": "", + "You cannot change the Disk size for an exiting minikube cluster. Please first delete the cluster.": "", + "You cannot change the memory size for an exiting minikube cluster. Please first delete the cluster.": "", + "You have selected Kubernetes v{{.new}}, but the existing cluster is running Kubernetes v{{.old}}": "", "You may need to manually remove the \"{{.name}}\" VM from your hypervisor": "您可能需要从管理程序中手动移除“{{.name}}”虚拟机", "You may need to stop the Hyper-V Manager and run `minikube delete` again.": "", "You must specify a service name": "", "Your host does not support KVM virtualization. Ensure that qemu-kvm is installed, and run 'virt-host-validate' to debug the problem": "", - "Your host does not support virtualization. If you are running minikube within a VM, try '--driver=none'. Otherwise, enable virtualization in your BIOS": "", + "Your host does not support virtualization. If you are running minikube within a VM, try '--driver=docker'. Otherwise, enable virtualization in your BIOS": "", "Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.": "", "Your minikube config refers to an unsupported driver. Erase ~/.minikube, and try again.": "", "Your minikube vm is not running, try minikube start.": "", + "[{{.id}}] {{.msg}} {{.error}}": "", + "adding node": "", "addon '{{.name}}' is currently not enabled.\nTo enable this addon run:\nminikube addons enable {{.name}}": "", "addon '{{.name}}' is not a valid addon packaged with minikube.\nTo see the list of available addons run:\nminikube addons list": "", "addon enable failed": "启用插件失败", "addons modifies minikube addons files using subcommands like \"minikube addons enable dashboard\"": "插件使用诸如 \"minikube addons enable dashboard\" 的子命令修改 minikube 的插件文件", - "api load": "", "bash completion failed": "", "call with cleanup=true to remove old tunnels": "", - "command runner": "", "config modifies minikube config files using subcommands like \"minikube config set driver kvm\"\nConfigurable fields:\\n\\n": "", "config view failed": "", - "creating api client": "", "dashboard service is not running: {{.error}}": "", + "deleting node": "", "disable failed": "禁用失败", "dry-run mode. Validates configuration, but does not mutate system state": "", "dry-run validation complete!": "", "enable failed": "开启失败", "error creating clientset": "", - "error creating machine client": "", "error getting primary control plane": "", "error getting ssh port": "", "error parsing the input ip address for mount": "", "error starting tunnel": "", "error stopping tunnel": "", + "error: --output must be 'yaml' or 'json'": "", "failed to open browser: {{.error}}": "", - "getting config": "", - "getting primary control plane": "", "if true, will embed the certs in kubeconfig.": "", "if you want to create a profile you can by this command: minikube start -p {{.profile_name}}": "", + "initialization failed, will try again: {{.error}}": "", "kubeadm detected a TCP port conflict with another process: probably another local Kubernetes installation. Run lsof -p\u003cport\u003e to find the process and kill it": "kubeadm 检测一个到与其他进程的 TCP 端口冲突:或许是另外的本地安装的 Kubernetes 导致。执行 lsof -p\u003cport\u003e 查找并杀死这些进程", "kubectl and minikube configuration will be stored in {{.home_folder}}": "kubectl 和 minikube 配置将存储在 {{.home_folder}} 中", - "kubectl not found in PATH, but is required for the dashboard. Installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "", "kubectl proxy": "", - "loading config": "", + "libmachine failed": "", "logdir set failed": "", - "machine '{{.name}}' does not exist. Proceeding ahead with recreating VM.": "", "max time to wait per Kubernetes core services to be healthy.": "每个 Kubernetes 核心服务保持健康所需的最长时间。", "minikube addons list --output OUTPUT. json, list": "", "minikube is exiting due to an error. If the above message is not useful, open an issue:": "由于出错 minikube 正在退出。如果以上信息没有帮助,请提交问题反馈:", + "minikube is not yet compatible with ChromeOS": "", "minikube is unable to access the Google Container Registry. You may need to configure it to use a HTTP proxy.": "", - "minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --driver\n\t- Use --force to override this connectivity check": "", + "minikube is unable to connect to the VM: {{.error}}\n\n\tThis is likely due to one of two reasons:\n\n\t- VPN or firewall interference\n\t- {{.hypervisor}} network configuration issue\n\n\tSuggested workarounds:\n\n\t- Disable your local VPN or firewall software\n\t- Configure your local VPN or firewall to allow access to {{.ip}}\n\t- Restart or reinstall {{.hypervisor}}\n\t- Use an alternative --vm-driver\n\t- Use --force to override this connectivity check": "", "minikube is unable to connect to the VM: {{.error}}\n\nThis is likely due to one of two reasons:\n\n- VPN or firewall interference\n- {{.hypervisor}} network configuration issue\n\nSuggested workarounds:\n\n- Disable your local VPN or firewall software\n- Configure your local VPN or firewall to allow access to {{.ip}}\n- Restart or reinstall {{.hypervisor}}\n- Use an alternative --vm-driver": "minikube 无法连接到虚拟机:{{.error}}\n\n可能是由于以下两个原因之一导致:\n\n-VPN 或防火墙冲突\n- {{.hypervisor}} 网络配置问题\n建议的方案:\n\n- 禁用本地的 VPN 或者防火墙软件\n- 配置本地 VPN 或防火墙软件,放行 {{.ip}}\n- 重启或者重装 {{.hypervisor}}\n- 使用另外的 --vm-driver", "minikube profile was successfully set to {{.profile_name}}": "", "minikube status --output OUTPUT. json, text": "", @@ -655,14 +717,16 @@ "mount failed": "", "namespaces to pause": "", "namespaces to unpause": "", + "none driver does not support multi-node clusters": "", "not enough arguments ({{.ArgCount}}).\\nusage: minikube config set PROPERTY_NAME PROPERTY_VALUE": "", "pause containers": "暂停容器", "profile sets the current minikube profile, or gets the current profile if no arguments are provided. This is used to run and manage multiple minikube instance. You can return to the default minikube profile by running `minikube profile default`": "", - "profile {{.name}} is not running.": "", "reload cached images.": "重新加载缓存的镜像", "reloads images previously added using the 'cache add' subcommand": "重新加载之前通过子命令 'cache add' 添加的镜像", "retrieving node": "", + "saving node": "", "service {{.namespace_name}}/{{.service_name}} has no node port": "", + "startup failed": "", "stat failed": "", "status json failure": "", "status text failure": "", @@ -686,17 +750,19 @@ "usage: minikube config unset PROPERTY_NAME": "", "usage: minikube delete": "", "usage: minikube profile [MINIKUBE_PROFILE_NAME]": "", + "version json failure": "", + "version yaml failure": "", "zsh completion failed": "", + "{{ .name }}: {{ .rejection }}": "", + "{{.driver_name}} \"{{.cluster}}\" {{.machine_type}} is missing, will recreate.": "", "{{.driver}} does not appear to be installed": "似乎并未安装 {{.driver}}", "{{.driver}} does not appear to be installed, but is specified by an existing profile. Please run 'minikube delete' or install {{.driver}}": "似乎并未安装 {{.driver}},但已被当前的配置文件指定。请执行 'minikube delete' 或者安装 {{.driver}}", "{{.extra_option_component_name}}.{{.key}}={{.value}}": "", - "{{.machine}} IP has been updated to point at {{.ip}}": "", - "{{.machine}} IP was already correctly configured for {{.ip}}": "", - "{{.name}} cluster does not exist": "", "{{.name}} has no available configuration options": "", "{{.name}} is already running": "", "{{.name}} was successfully configured": "", "{{.path}} is version {{.client_version}}, and is incompatible with Kubernetes {{.cluster_version}}. You will need to update {{.path}} or use 'minikube kubectl' to connect with this cluster": "{{.path}} 的版本是 {{.client_version}},且与 Kubernetes {{.cluster_version}} 不兼容。您需要更新 {{.path}} 或者使用 'minikube kubectl' 连接到这个集群", + "{{.path}} is v{{.client_version}}, which may be incompatible with Kubernetes v{{.cluster_version}}.": "", "{{.prefix}}minikube {{.version}} on {{.platform}}": "{{.platform}} 上的 {{.prefix}}minikube {{.version}}", "{{.type}} is not yet a supported filesystem. We will try anyways!": "", "{{.url}} is not accessible: {{.error}}": ""