rebase
commit
c6a682e4d5
|
@ -2,21 +2,24 @@
|
|||
name: English
|
||||
about: Report an issue
|
||||
---
|
||||
<!-- Please use this template while reporting an issue, providing as much information as possible. Failure to do so may result in a delayed response. Thank you! -->
|
||||
<!--- Please include the "minikube start" command you used in your reproduction steps --->
|
||||
**Steps to reproduce the issue:**
|
||||
|
||||
**The exact command to reproduce the issue**:
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
|
||||
<!--- TIP: Add the "--alsologtostderr" flag to the command-line for more logs --->
|
||||
**Full output of failed command:**
|
||||
|
||||
|
||||
**The full output of the command that failed**:<details>
|
||||
|
||||
**Full output of `minikube start` command used, if not already included:**
|
||||
|
||||
|
||||
|
||||
**Optional: Full output of `minikube logs` command:**
|
||||
<details>
|
||||
|
||||
|
||||
</details>
|
||||
|
||||
**The output of the `minikube logs` command**:<details>
|
||||
|
||||
|
||||
|
||||
</details>
|
||||
|
||||
**The operating system version**:
|
||||
|
|
|
@ -19,6 +19,8 @@ jobs:
|
|||
run : |
|
||||
make minikube-linux-amd64
|
||||
make e2e-linux-amd64
|
||||
make minikube-windows-amd64.exe
|
||||
make e2e-windows-amd64.exe
|
||||
cp -r test/integration/testdata ./out
|
||||
whoami
|
||||
echo github ref $GITHUB_REF
|
||||
|
@ -81,24 +83,43 @@ jobs:
|
|||
GOPOGH_RESULT: ""
|
||||
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
|
||||
runs-on: ubuntu-16.04
|
||||
steps:
|
||||
steps:
|
||||
- name: Install kubectl
|
||||
shell: bash
|
||||
run: |
|
||||
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
|
||||
sudo install kubectl /usr/local/bin/kubectl
|
||||
kubectl version --client=true
|
||||
- name: Docker Info
|
||||
shell: bash
|
||||
run: |
|
||||
docker info || true
|
||||
echo "--------------------------"
|
||||
docker version || true
|
||||
echo "--------------------------"
|
||||
docker info || true
|
||||
echo "--------------------------"
|
||||
docker system df || true
|
||||
echo "--------------------------"
|
||||
docker system info || true
|
||||
echo "--------------------------"
|
||||
docker ps || true
|
||||
echo "--------------------------"
|
||||
- name: Install lz4
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get -qq -y install liblz4-tool
|
||||
- name: Install gopogh
|
||||
shell: bash
|
||||
run: |
|
||||
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.16/gopogh-linux-amd64
|
||||
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.18/gopogh-linux-amd64
|
||||
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
|
||||
- name: Download Binaries
|
||||
uses: actions/download-artifact@v1
|
||||
with:
|
||||
name: minikube_binaries
|
||||
- name: Run Integration Test
|
||||
continue-on-error: true
|
||||
continue-on-error: false
|
||||
# bash {0} to allow test to continue to next step. in case of
|
||||
shell: bash {0}
|
||||
run: |
|
||||
|
@ -108,7 +129,7 @@ jobs:
|
|||
chmod a+x e2e-*
|
||||
chmod a+x minikube-*
|
||||
START_TIME=$(date -u +%s)
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--vm-driver=docker -test.timeout=70m -test.v -timeout-multiplier=3 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--vm-driver=docker -test.timeout=80m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
END_TIME=$(date -u +%s)
|
||||
TIME_ELAPSED=$(($END_TIME-$START_TIME))
|
||||
min=$((${TIME_ELAPSED}/60))
|
||||
|
@ -140,6 +161,8 @@ jobs:
|
|||
echo "----------------${numFail} Failures----------------------------"
|
||||
echo $STAT | jq '.FailedTests' || true
|
||||
echo "-------------------------------------------------------"
|
||||
numPass=$(echo $STAT | jq '.NumberOfPass')
|
||||
echo "*** $numPass Passed ***"
|
||||
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
|
||||
docker_ubuntu_18_04:
|
||||
runs-on: ubuntu-18.04
|
||||
|
@ -150,16 +173,35 @@ jobs:
|
|||
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
|
||||
needs: [build_minikube]
|
||||
steps:
|
||||
- name: Install kubectl
|
||||
shell: bash
|
||||
run: |
|
||||
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
|
||||
sudo install kubectl /usr/local/bin/kubectl
|
||||
kubectl version --client=true
|
||||
- name: Install lz4
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get -qq -y install liblz4-tool
|
||||
- name: Docker Info
|
||||
shell: bash
|
||||
run: |
|
||||
docker info || true
|
||||
echo "--------------------------"
|
||||
docker version || true
|
||||
echo "--------------------------"
|
||||
docker info || true
|
||||
echo "--------------------------"
|
||||
docker system df || true
|
||||
echo "--------------------------"
|
||||
docker system info || true
|
||||
echo "--------------------------"
|
||||
docker ps || true
|
||||
echo "--------------------------"
|
||||
- name: Install gopogh
|
||||
shell: bash
|
||||
run: |
|
||||
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.16/gopogh-linux-amd64
|
||||
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.18/gopogh-linux-amd64
|
||||
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
|
||||
- name: Download Binaries
|
||||
uses: actions/download-artifact@v1
|
||||
|
@ -176,7 +218,7 @@ jobs:
|
|||
chmod a+x e2e-*
|
||||
chmod a+x minikube-*
|
||||
START_TIME=$(date -u +%s)
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.timeout=70m -test.v -timeout-multiplier=3 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.timeout=80m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
END_TIME=$(date -u +%s)
|
||||
TIME_ELAPSED=$(($END_TIME-$START_TIME))
|
||||
min=$((${TIME_ELAPSED}/60))
|
||||
|
@ -208,7 +250,73 @@ jobs:
|
|||
echo "----------------${numFail} Failures----------------------------"
|
||||
echo $STAT | jq '.FailedTests' || true
|
||||
echo "-------------------------------------------------------"
|
||||
numPass=$(echo $STAT | jq '.NumberOfPass')
|
||||
echo "*** $numPass Passed ***"
|
||||
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
|
||||
docker_on_windows:
|
||||
needs: [build_minikube]
|
||||
env:
|
||||
TIME_ELAPSED: time
|
||||
JOB_NAME: "Docker_on_windows"
|
||||
COMMIT_STATUS: ""
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Docker Info
|
||||
shell: bash
|
||||
run: |
|
||||
docker info || true
|
||||
docker version || true
|
||||
docker ps || true
|
||||
- name: Download gopogh
|
||||
run: |
|
||||
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.16/gopogh.exe
|
||||
shell: bash
|
||||
- name: Download binaries
|
||||
uses: actions/download-artifact@v1
|
||||
with:
|
||||
name: minikube_binaries
|
||||
- name: run integration test
|
||||
continue-on-error: true
|
||||
run: |
|
||||
set +euo pipefail
|
||||
mkdir -p report
|
||||
mkdir -p testhome
|
||||
START_TIME=$(date -u +%s)
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome minikube_binaries/e2e-windows-amd64.exe -minikube-start-args=--vm-driver=docker -binary=minikube_binaries/minikube-windows-amd64.exe -test.v -test.timeout=65m 2>&1 | tee ./report/testout.txt
|
||||
END_TIME=$(date -u +%s)
|
||||
TIME_ELAPSED=$(($END_TIME-$START_TIME))
|
||||
min=$((${TIME_ELAPSED}/60))
|
||||
sec=$((${TIME_ELAPSED}%60))
|
||||
TIME_ELAPSED="${min} min $sec seconds"
|
||||
echo ::set-env name=TIME_ELAPSED::${TIME_ELAPSED}
|
||||
shell: bash
|
||||
- name: Generate html report
|
||||
run: |
|
||||
go tool test2json -t < ./report/testout.txt > ./report/testout.json || true
|
||||
STAT=$(${GITHUB_WORKSPACE}/gopogh.exe -in ./report/testout.json -out ./report/testout.html -name " $GITHUB_REF" -repo "${JOB_NAME} ${GITHUB_REF} ${GITHUB_REPOSITORY}" -details "${GITHUB_SHA}") || true
|
||||
echo status: ${STAT}
|
||||
FailNum=$(echo $STAT | jq '.NumberOfFail')
|
||||
TestsNum=$(echo $STAT | jq '.NumberOfTests')
|
||||
GOPOGH_RESULT="${JOB_NAME} : completed with ${FailNum} / ${TestsNum} failures in ${TIME_ELAPSED}"
|
||||
echo ::set-env name=GOPOGH_RESULT::${GOPOGH_RESULT}
|
||||
echo ::set-env name=STAT::${STAT}
|
||||
shell: bash
|
||||
- uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: docker_on_windows
|
||||
path: report
|
||||
- name: The End Result
|
||||
run: |
|
||||
echo ${GOPOGH_RESULT}
|
||||
numFail=$(echo $STAT | jq '.NumberOfFail')
|
||||
echo "----------------${numFail} Failures----------------------------"
|
||||
echo $STAT | jq '.FailedTests' || true
|
||||
echo "--------------------------------------------"
|
||||
numPass=$(echo $STAT | jq '.NumberOfPass')
|
||||
echo "*** $numPass Passed ***"
|
||||
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
|
||||
shell: bash
|
||||
none_ubuntu16_04:
|
||||
needs: [build_minikube]
|
||||
env:
|
||||
|
@ -218,10 +326,27 @@ jobs:
|
|||
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
|
||||
runs-on: ubuntu-16.04
|
||||
steps:
|
||||
- name: Install kubectl
|
||||
shell: bash
|
||||
run: |
|
||||
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
|
||||
sudo install kubectl /usr/local/bin/kubectl
|
||||
kubectl version --client=true
|
||||
# conntrack is required for kubernetes 1.18 and higher
|
||||
# socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon
|
||||
- name: Install tools for none
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get -qq -y install conntrack
|
||||
sudo apt-get -qq -y install socat
|
||||
VERSION="v1.17.0"
|
||||
curl -L https://github.com/kubernetes-sigs/cri-tools/releases/download/$VERSION/crictl-${VERSION}-linux-amd64.tar.gz --output crictl-${VERSION}-linux-amd64.tar.gz
|
||||
sudo tar zxvf crictl-$VERSION-linux-amd64.tar.gz -C /usr/local/bin
|
||||
- name: Install gopogh
|
||||
shell: bash
|
||||
run: |
|
||||
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.16/gopogh-linux-amd64
|
||||
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.18/gopogh-linux-amd64
|
||||
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
|
||||
- name: Download Binaries
|
||||
uses: actions/download-artifact@v1
|
||||
|
@ -238,7 +363,7 @@ jobs:
|
|||
chmod a+x e2e-*
|
||||
chmod a+x minikube-*
|
||||
START_TIME=$(date -u +%s)
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=none -test.timeout=70m -test.v -timeout-multiplier=3 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=none -test.timeout=35m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
END_TIME=$(date -u +%s)
|
||||
TIME_ELAPSED=$(($END_TIME-$START_TIME))
|
||||
min=$((${TIME_ELAPSED}/60))
|
||||
|
@ -270,6 +395,8 @@ jobs:
|
|||
echo "----------------${numFail} Failures----------------------------"
|
||||
echo $STAT | jq '.FailedTests' || true
|
||||
echo "-------------------------------------------------------"
|
||||
numPass=$(echo $STAT | jq '.NumberOfPass')
|
||||
echo "*** $numPass Passed ***"
|
||||
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
|
||||
none_ubuntu18_04:
|
||||
needs: [build_minikube]
|
||||
|
@ -280,10 +407,27 @@ jobs:
|
|||
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- name: Install kubectl
|
||||
shell: bash
|
||||
run: |
|
||||
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
|
||||
sudo install kubectl /usr/local/bin/kubectl
|
||||
kubectl version --client=true
|
||||
# conntrack is required for kubernetes 1.18 and higher
|
||||
# socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon
|
||||
- name: Install tools for none
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get -qq -y install conntrack
|
||||
sudo apt-get -qq -y install socat
|
||||
VERSION="v1.17.0"
|
||||
curl -L https://github.com/kubernetes-sigs/cri-tools/releases/download/$VERSION/crictl-${VERSION}-linux-amd64.tar.gz --output crictl-${VERSION}-linux-amd64.tar.gz
|
||||
sudo tar zxvf crictl-$VERSION-linux-amd64.tar.gz -C /usr/local/bin
|
||||
- name: Install gopogh
|
||||
shell: bash
|
||||
run: |
|
||||
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.16/gopogh-linux-amd64
|
||||
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.18/gopogh-linux-amd64
|
||||
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
|
||||
- name: Download Binaries
|
||||
uses: actions/download-artifact@v1
|
||||
|
@ -300,7 +444,7 @@ jobs:
|
|||
chmod a+x e2e-*
|
||||
chmod a+x minikube-*
|
||||
START_TIME=$(date -u +%s)
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=none -test.timeout=70m -test.v -timeout-multiplier=3 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=none -test.timeout=35m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
END_TIME=$(date -u +%s)
|
||||
TIME_ELAPSED=$(($END_TIME-$START_TIME))
|
||||
min=$((${TIME_ELAPSED}/60))
|
||||
|
@ -332,8 +476,10 @@ jobs:
|
|||
echo "----------------${numFail} Failures----------------------------"
|
||||
echo $STAT | jq '.FailedTests' || true
|
||||
echo "-------------------------------------------------------"
|
||||
numPass=$(echo $STAT | jq '.NumberOfPass')
|
||||
echo "*** $numPass Passed ***"
|
||||
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
|
||||
podman_ubuntu_18_04:
|
||||
podman_ubuntu_18_04_experimental:
|
||||
needs: [build_minikube]
|
||||
env:
|
||||
TIME_ELAPSED: time
|
||||
|
@ -342,7 +488,13 @@ jobs:
|
|||
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- name: install podman
|
||||
- name: Install kubectl
|
||||
shell: bash
|
||||
run: |
|
||||
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
|
||||
sudo install kubectl /usr/local/bin/kubectl
|
||||
kubectl version --client=true
|
||||
- name: Install podman
|
||||
shell: bash
|
||||
run: |
|
||||
. /etc/os-release
|
||||
|
@ -356,7 +508,7 @@ jobs:
|
|||
- name: Install gopogh
|
||||
shell: bash
|
||||
run: |
|
||||
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.16/gopogh-linux-amd64
|
||||
curl -LO https://github.com/medyagh/gopogh/releases/download/v0.1.18/gopogh-linux-amd64
|
||||
sudo install gopogh-linux-amd64 /usr/local/bin/gopogh
|
||||
- name: Download binaries
|
||||
uses: actions/download-artifact@v1
|
||||
|
@ -373,7 +525,7 @@ jobs:
|
|||
chmod a+x e2e-*
|
||||
chmod a+x minikube-*
|
||||
START_TIME=$(date -u +%s)
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=podman -test.timeout=70m -test.v -timeout-multiplier=3 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=podman -test.timeout=30m -test.v -timeout-multiplier=1 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
END_TIME=$(date -u +%s)
|
||||
TIME_ELAPSED=$(($END_TIME-$START_TIME))
|
||||
min=$((${TIME_ELAPSED}/60))
|
||||
|
@ -405,12 +557,14 @@ jobs:
|
|||
echo "----------------${numFail} Failures----------------------------"
|
||||
echo $STAT | jq '.FailedTests' || true
|
||||
echo "-------------------------------------------------------"
|
||||
numPass=$(echo $STAT | jq '.NumberOfPass')
|
||||
echo "*** $numPass Passed ***"
|
||||
if [ "$numFail" -gt 0 ];then echo "*** $numFail Failed ***";exit 2;fi
|
||||
# After all 4 integration tests finished
|
||||
# collect all the reports and upload
|
||||
upload_all_reports:
|
||||
if: always()
|
||||
needs: [docker_ubuntu_16_04,docker_ubuntu_18_04,none_ubuntu16_04,none_ubuntu18_04,podman_ubuntu_18_04]
|
||||
needs: [docker_ubuntu_16_04,docker_ubuntu_18_04,none_ubuntu16_04,none_ubuntu18_04,podman_ubuntu_18_04_experimental]
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- name: Download Results docker_ubuntu_16_04
|
||||
|
@ -433,6 +587,15 @@ jobs:
|
|||
run: |
|
||||
mkdir -p all_reports
|
||||
cp -r docker_ubuntu_18_04 ./all_reports/
|
||||
- name: download results docker_on_windows
|
||||
uses: actions/download-artifact@v1
|
||||
with:
|
||||
name: docker_on_windows
|
||||
- name: cp to all_report
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p all_reports
|
||||
cp -r docker_on_windows ./all_reports/
|
||||
- name: Download Results none_ubuntu16_04
|
||||
uses: actions/download-artifact@v1
|
||||
with:
|
||||
|
|
165
CHANGELOG.md
165
CHANGELOG.md
|
@ -1,5 +1,170 @@
|
|||
# Release Notes
|
||||
|
||||
## Version 1.9.2 - 2020-04-04
|
||||
|
||||
Minor improvements:
|
||||
|
||||
* UX: Remove noisy debug statement [#7407](https://github.com/kubernetes/minikube/pull/7407)
|
||||
* Feature: Make --wait more flexible [#7375](https://github.com/kubernetes/minikube/pull/7375)
|
||||
* Docker: adjust warn if slow for ps and volume [#7410](https://github.com/kubernetes/minikube/pull/7410)
|
||||
* Localization: Update Japanese translations [#7403](https://github.com/kubernetes/minikube/pull/7403)
|
||||
* Performance: Parallelize updating cluster and setting up certs [#7394](https://github.com/kubernetes/minikube/pull/7394)
|
||||
* Addons: allow ingress addon for docker/podman drivers only on linux for now [#7393](https://github.com/kubernetes/minikube/pull/7393)
|
||||
|
||||
- Anders F Björklund
|
||||
- Medya Ghazizadeh
|
||||
- Prasad Katti
|
||||
- Priya Wadhwa
|
||||
- Thomas Strömberg
|
||||
- tomocy
|
||||
|
||||
## Version 1.9.1 - 2020-04-02
|
||||
|
||||
Improvements:
|
||||
|
||||
* add delete-on-failure flag [#7345](https://github.com/kubernetes/minikube/pull/7345)
|
||||
* Run dashboard with internal kubectl if not in path [#7299](https://github.com/kubernetes/minikube/pull/7299)
|
||||
* Implement options for the minikube version command [#7325](https://github.com/kubernetes/minikube/pull/7325)
|
||||
* service list cmd: display target port and name [#6879](https://github.com/kubernetes/minikube/pull/6879)
|
||||
* Add rejection reason to 'unable to find driver' error [#7379](https://github.com/kubernetes/minikube/pull/7379)
|
||||
* Update Japanese translations [#7359](https://github.com/kubernetes/minikube/pull/7359)
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Make eviction and image GC settings consistent across kubeadm API versions [#7364](https://github.com/kubernetes/minikube/pull/7364)
|
||||
* Move errors and warnings to output to stderr [#7382](https://github.com/kubernetes/minikube/pull/7382)
|
||||
* Correct assumptions for forwarded hostname & IP handling [#7360](https://github.com/kubernetes/minikube/pull/7360)
|
||||
* Extend maximum stop retry from 30s to 120s [#7363](https://github.com/kubernetes/minikube/pull/7363)
|
||||
* Use kubectl version --short if --output=json fails [#7356](https://github.com/kubernetes/minikube/pull/7356)
|
||||
* Fix embed certs by updating kubeconfig after certs are populated [#7309](https://github.com/kubernetes/minikube/pull/7309)
|
||||
* none: Use LookPath to verify conntrack install [#7305](https://github.com/kubernetes/minikube/pull/7305)
|
||||
* Show all global flags in options command [#7292](https://github.com/kubernetes/minikube/pull/7292)
|
||||
* Fix null deref in start host err [#7278](https://github.com/kubernetes/minikube/pull/7278)
|
||||
* Increase Docker "slow" timeouts to 15s [#7268](https://github.com/kubernetes/minikube/pull/7268)
|
||||
* none: check for docker and root uid [#7388](https://github.com/kubernetes/minikube/pull/7388)
|
||||
|
||||
Thank you to our contributors for this release!
|
||||
|
||||
- Anders F Björklund
|
||||
- Dan Lorenc
|
||||
- Eberhard Wolff
|
||||
- John Laswell
|
||||
- Marcin Niemira
|
||||
- Medya Ghazizadeh
|
||||
- Prasad Katti
|
||||
- Priya Wadhwa
|
||||
- Sharif Elgamal
|
||||
- Thomas Strömberg
|
||||
- Vincent Link
|
||||
- anencore94
|
||||
- priyawadhwa
|
||||
- re;i
|
||||
- tomocy
|
||||
|
||||
## Version 1.9.0 - 2020-03-26
|
||||
|
||||
New features & improvements
|
||||
|
||||
* Update DefaultKubernetesVersion to v1.18.0 [#7235](https://github.com/kubernetes/minikube/pull/7235)
|
||||
* Add --vm flag for users who want to autoselect only VM's [#7068](https://github.com/kubernetes/minikube/pull/7068)
|
||||
* Add 'stable' and 'latest' as valid kubernetes-version values [#7212](https://github.com/kubernetes/minikube/pull/7212)
|
||||
|
||||
* gpu addon: privileged mode no longer required [#7149](https://github.com/kubernetes/minikube/pull/7149)
|
||||
* Add sch_tbf and extend filter ipset kernel module for bandwidth shaping [#7255](https://github.com/kubernetes/minikube/pull/7255)
|
||||
* Parse --disk-size and --memory sizes with binary suffixes [#7206](https://github.com/kubernetes/minikube/pull/7206)
|
||||
|
||||
|
||||
Bug Fixes
|
||||
|
||||
* Re-initalize failed Kubernetes clusters [#7234](https://github.com/kubernetes/minikube/pull/7234)
|
||||
* do not override hostname if extraConfig is specified [#7238](https://github.com/kubernetes/minikube/pull/7238)
|
||||
* Enable HW_RANDOM_VIRTIO to fix sshd startup delays [#7208](https://github.com/kubernetes/minikube/pull/7208)
|
||||
* hyperv Delete: call StopHost before removing VM [#7160](https://github.com/kubernetes/minikube/pull/7160)
|
||||
|
||||
Huge thank you for this release towards our contributors:
|
||||
|
||||
- Anders F Björklund
|
||||
- Medya Ghazizadeh
|
||||
- Priya Wadhwa
|
||||
- Sharif Elgamal
|
||||
- Thomas Strömberg
|
||||
- Tom
|
||||
- Vincent Link
|
||||
- Yang Keao
|
||||
- Zhongcheng Lao
|
||||
- vikkyomkar
|
||||
|
||||
|
||||
## Version 1.9.0-beta.2 - 2020-03-21
|
||||
|
||||
New features & improvements
|
||||
|
||||
* 🎉 Experimental multi-node support 🎊 [#6787](https://github.com/kubernetes/minikube/pull/6787)
|
||||
* Add kubectl desc nodes to minikube logs [#7105](https://github.com/kubernetes/minikube/pull/7105)
|
||||
* bumpup helm-tiller v2.16.1 → v2.16.3 [#7130](https://github.com/kubernetes/minikube/pull/7130)
|
||||
* Update Nvidia GPU plugin [#7132](https://github.com/kubernetes/minikube/pull/7132)
|
||||
* bumpup istio & istio-provisoner addon 1.4.0 → 1.5.0 [#7120](https://github.com/kubernetes/minikube/pull/7120)
|
||||
* New addon: registry-aliases [#6657](https://github.com/kubernetes/minikube/pull/6657)
|
||||
* Upgrade buildroot minor version [#7101](https://github.com/kubernetes/minikube/pull/7101)
|
||||
* Skip kubeadm if cluster is running & properly configured [#7124](https://github.com/kubernetes/minikube/pull/7124)
|
||||
* Make certificates per-profile and consistent until IP or names change [#7125](https://github.com/kubernetes/minikube/pull/7125)
|
||||
|
||||
Bugfixes
|
||||
|
||||
* Prevent minikube from crashing if namespace or service doesn't exist [#5844](https://github.com/kubernetes/minikube/pull/5844)
|
||||
* Add warning if both vm-driver and driver are specified [#7109](https://github.com/kubernetes/minikube/pull/7109)
|
||||
* Improve error when docker-env is used with non-docker runtime [#7112](https://github.com/kubernetes/minikube/pull/7112)
|
||||
* provisioner: only reload docker if necessary, don't install curl [#7115](https://github.com/kubernetes/minikube/pull/7115)
|
||||
|
||||
Thank you to our contributors:
|
||||
|
||||
- Anders F Björklund
|
||||
- Iso Kenta
|
||||
- Kamesh Sampath
|
||||
- Kenta Iso
|
||||
- Prasad Katti
|
||||
- Priya Wadhwa
|
||||
- Sharif Elgamal
|
||||
- Tacio Costa
|
||||
- Thomas Strömberg
|
||||
- Zhongcheng Lao
|
||||
- rajula96reddy
|
||||
- sayboras
|
||||
|
||||
## Version 1.9.0-beta.1 - 2020-03-18
|
||||
|
||||
New features
|
||||
|
||||
* Use Kubernetes v1.18.0-rc.1 by default [#7076](https://github.com/kubernetes/minikube/pull/7076)
|
||||
* Upgrade Docker driver to preferred (Linux), default on other platforms [#7090](https://github.com/kubernetes/minikube/pull/7090)
|
||||
* Upgrade Docker, from 19.03.7 to 19.03.8 [#7040](https://github.com/kubernetes/minikube/pull/7040)
|
||||
* Upgrade Docker, from 19.03.6 to 19.03.7 [#6939](https://github.com/kubernetes/minikube/pull/6939)
|
||||
* Upgrade dashboard to v2.0.0-rc6 [#7098](https://github.com/kubernetes/minikube/pull/7098)
|
||||
* Upgrade crio to 1.17.1 [#7099](https://github.com/kubernetes/minikube/pull/7099)
|
||||
* Updated French translation [#7055](https://github.com/kubernetes/minikube/pull/7055)
|
||||
|
||||
Bugfixes
|
||||
|
||||
* If user doesn't specify driver, don't validate against existing cluster [#7096](https://github.com/kubernetes/minikube/pull/7096)
|
||||
* Strip the version prefix before calling semver [#7054](https://github.com/kubernetes/minikube/pull/7054)
|
||||
* Move some of the driver validation before driver selection [#7080](https://github.com/kubernetes/minikube/pull/7080)
|
||||
* Fix bug where global config memory was ignored [#7082](https://github.com/kubernetes/minikube/pull/7082)
|
||||
* Remove controllerManager from the kubeadm v1beta2 template [#7030](https://github.com/kubernetes/minikube/pull/7030)
|
||||
* Delete: output underlying status failure [#7043](https://github.com/kubernetes/minikube/pull/7043)
|
||||
* status: error properly if cluster does not exist [#7041](https://github.com/kubernetes/minikube/pull/7041)
|
||||
|
||||
Huge thank you for this release towards our contributors:
|
||||
|
||||
- Anders F Björklund
|
||||
- Medya Ghazizadeh
|
||||
- Priya Wadhwa
|
||||
- RA489
|
||||
- Richard Wall
|
||||
- Sharif Elgamal
|
||||
- Thomas Strömberg
|
||||
- Vikky Omkar
|
||||
- jumahmohammad
|
||||
|
||||
## Version 1.8.2 - 2020-03-13
|
||||
|
||||
Shiny new improvements:
|
||||
|
|
32
Makefile
32
Makefile
|
@ -14,15 +14,13 @@
|
|||
|
||||
# Bump these on release - and please check ISO_VERSION for correctness.
|
||||
VERSION_MAJOR ?= 1
|
||||
VERSION_MINOR ?= 8
|
||||
VERSION_MINOR ?= 9
|
||||
VERSION_BUILD ?= 2
|
||||
RAW_VERSION=$(VERSION_MAJOR).$(VERSION_MINOR).${VERSION_BUILD}
|
||||
RAW_VERSION=$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_BUILD)
|
||||
VERSION ?= v$(RAW_VERSION)
|
||||
|
||||
KUBERNETES_VERSION ?= $(shell egrep "DefaultKubernetesVersion =" pkg/minikube/constants/constants.go | cut -d \" -f2)
|
||||
KIC_VERSION ?= $(shell egrep "Version =" pkg/drivers/kic/types.go | cut -d \" -f2)
|
||||
PRELOADED_TARBALL_VERSION ?= $(shell egrep "PreloadVersion =" pkg/minikube/download/preload.go | cut -d \" -f2)
|
||||
PRELOADED_VOLUMES_GCS_BUCKET ?= $(shell egrep "PreloadBucket =" pkg/minikube/download/preload.go | cut -d \" -f2)
|
||||
|
||||
# Default to .0 for higher cache hit rates, as build increments typically don't require new ISO versions
|
||||
ISO_VERSION ?= v$(VERSION_MAJOR).$(VERSION_MINOR).0
|
||||
|
@ -34,7 +32,7 @@ RPM_VERSION ?= $(DEB_VERSION)
|
|||
GO_VERSION ?= 1.13.8
|
||||
|
||||
INSTALL_SIZE ?= $(shell du out/minikube-windows-amd64.exe | cut -f1)
|
||||
BUILDROOT_BRANCH ?= 2019.02.9
|
||||
BUILDROOT_BRANCH ?= 2019.02.10
|
||||
REGISTRY?=gcr.io/k8s-minikube
|
||||
|
||||
# Get git commit id
|
||||
|
@ -54,7 +52,7 @@ MINIKUBE_BUCKET ?= minikube/releases
|
|||
MINIKUBE_UPLOAD_LOCATION := gs://${MINIKUBE_BUCKET}
|
||||
MINIKUBE_RELEASES_URL=https://github.com/kubernetes/minikube/releases/download
|
||||
|
||||
KERNEL_VERSION ?= 4.19.94
|
||||
KERNEL_VERSION ?= 4.19.107
|
||||
# latest from https://github.com/golangci/golangci-lint/releases
|
||||
GOLINT_VERSION ?= v1.23.6
|
||||
# Limit number of default jobs, to avoid the CI builds running out of memory
|
||||
|
@ -113,7 +111,7 @@ MINIKUBE_TEST_FILES := ./cmd/... ./pkg/...
|
|||
MARKDOWNLINT ?= markdownlint
|
||||
|
||||
|
||||
MINIKUBE_MARKDOWN_FILES := README.md docs CONTRIBUTING.md CHANGELOG.md
|
||||
MINIKUBE_MARKDOWN_FILES := README.md CONTRIBUTING.md CHANGELOG.md
|
||||
|
||||
MINIKUBE_BUILD_TAGS := container_image_ostree_stub containers_image_openpgp
|
||||
MINIKUBE_BUILD_TAGS += go_getter_nos3 go_getter_nogcs
|
||||
|
@ -270,11 +268,11 @@ integration-versioned: out/minikube ## Trigger minikube integration testing
|
|||
|
||||
.PHONY: test
|
||||
test: pkg/minikube/assets/assets.go pkg/minikube/translate/translations.go ## Trigger minikube test
|
||||
./test.sh
|
||||
MINIKUBE_LDFLAGS="${MINIKUBE_LDFLAGS}" ./test.sh
|
||||
|
||||
.PHONY: gotest
|
||||
gotest: $(SOURCE_GENERATED) ## Trigger minikube test
|
||||
go test -tags "$(MINIKUBE_BUILD_TAGS)" $(MINIKUBE_TEST_FILES)
|
||||
.PHONY: generate-docs
|
||||
generate-docs: out/minikube ## Automatically generate commands documentation.
|
||||
out/minikube generate-docs --path ./site/content/en/docs/commands/
|
||||
|
||||
.PHONY: extract
|
||||
extract: ## Compile extract tool
|
||||
|
@ -526,14 +524,8 @@ kic-base-image: ## builds the base image used for kic.
|
|||
docker build -f ./hack/images/kicbase.Dockerfile -t $(REGISTRY)/kicbase:$(KIC_VERSION)-snapshot --build-arg COMMIT_SHA=${VERSION}-$(COMMIT) --target base .
|
||||
|
||||
.PHONY: upload-preloaded-images-tar
|
||||
upload-preloaded-images-tar: generate-preloaded-images-tar # Upload the preloaded images tar to the GCS bucket. Specify a specific kubernetes version to build via `KUBERNETES_VERSION=vx.y.z make upload-preloaded-images-tar`.
|
||||
gsutil cp out/preloaded-images-k8s-${PRELOADED_TARBALL_VERSION}-${KUBERNETES_VERSION}-docker-overlay2.tar.lz4 gs://${PRELOADED_VOLUMES_GCS_BUCKET}
|
||||
gsutil acl ch -u AllUsers:R gs://${PRELOADED_VOLUMES_GCS_BUCKET}/preloaded-images-k8s-${PRELOADED_TARBALL_VERSION}-${KUBERNETES_VERSION}-docker-overlay2.tar.lz4
|
||||
|
||||
.PHONY: generate-preloaded-images-tar
|
||||
generate-preloaded-images-tar:
|
||||
go run ./hack/preload-images/preload_images.go -kubernetes-version ${KUBERNETES_VERSION} -preloaded-tarball-version ${PRELOADED_TARBALL_VERSION}
|
||||
|
||||
upload-preloaded-images-tar: out/minikube # Upload the preloaded images for oldest supported, newest supported, and default kubernetes versions to GCS.
|
||||
go run ./hack/preload-images/*.go
|
||||
|
||||
.PHONY: push-storage-provisioner-image
|
||||
push-storage-provisioner-image: storage-provisioner-image ## Push storage-provisioner docker image using gcloud
|
||||
|
@ -633,7 +625,7 @@ release-kvm-driver: install-kvm-driver checksum ## Release KVM Driver
|
|||
gsutil cp $(GOBIN)/docker-machine-driver-kvm2 gs://minikube/drivers/kvm/$(VERSION)/
|
||||
gsutil cp $(GOBIN)/docker-machine-driver-kvm2.sha256 gs://minikube/drivers/kvm/$(VERSION)/
|
||||
|
||||
site/themes/docsy/assets/vendor/bootstrap/package.js:
|
||||
site/themes/docsy/assets/vendor/bootstrap/package.js: ## update the website docsy theme git submodule
|
||||
git submodule update -f --init --recursive
|
||||
|
||||
out/hugo/hugo:
|
||||
|
|
12
README.md
12
README.md
|
@ -2,6 +2,9 @@
|
|||
|
||||
[![BuildStatus Widget]][BuildStatus Result]
|
||||
[![GoReport Widget]][GoReport Status]
|
||||
[![Github All Releases](https://img.shields.io/github/downloads/kubernetes/minikube/total.svg)](https://github.com/kubernetes/minikube/releases/latest)
|
||||
[![Latest Release](https://img.shields.io/github/v/release/kubernetes/minikube?include_prereleases)](https://github.com/kubernetes/minikube/releases/latest)
|
||||
|
||||
|
||||
[BuildStatus Result]: https://travis-ci.org/kubernetes/minikube
|
||||
[BuildStatus Widget]: https://travis-ci.org/kubernetes/minikube.svg?branch=master
|
||||
|
@ -13,7 +16,8 @@
|
|||
|
||||
minikube implements a local Kubernetes cluster on macOS, Linux, and Windows. minikube's [primary goals](https://minikube.sigs.k8s.io/docs/concepts/principles/) are to be the best tool for local Kubernetes application development and to support all Kubernetes features that fit.
|
||||
|
||||
<img src="https://github.com/kubernetes/minikube/raw/master/site/content/en/start.png" width="738" alt="screenshot">
|
||||
<img src="https://raw.githubusercontent.com/kubernetes/minikube/master/site/static/images/screenshot.png" width="738" alt="screenshot">
|
||||
|
||||
|
||||
## Features
|
||||
|
||||
|
@ -48,7 +52,7 @@ See https://minikube.sigs.k8s.io/docs/
|
|||
|
||||
## More Examples
|
||||
|
||||
See our [examples page](https://minikube.sigs.k8s.io/docs/examples/)
|
||||
See minikube in action [here](https://minikube.sigs.k8s.io/docs/handbook/controls/)
|
||||
|
||||
## Community
|
||||
|
||||
|
@ -59,6 +63,6 @@ minikube is a Kubernetes [#sig-cluster-lifecycle](https://github.com/kubernetes/
|
|||
* [minikube-dev mailing list](https://groups.google.com/forum/#!forum/minikube-dev)
|
||||
* [Bi-weekly office hours, Mondays @ 11am PST](https://tinyurl.com/minikube-oh)
|
||||
|
||||
* [Contributing](https://minikube.sigs.k8s.io/docs/contributing/)
|
||||
* [Development Roadmap](https://minikube.sigs.k8s.io/docs/contributing/roadmap/)
|
||||
* [Contributing](https://minikube.sigs.k8s.io/docs/contrib/)
|
||||
* [Development Roadmap](https://minikube.sigs.k8s.io/docs/contrib/roadmap/)
|
||||
|
||||
|
|
|
@ -26,10 +26,10 @@ import (
|
|||
"github.com/golang/glog"
|
||||
"github.com/olekukonko/tablewriter"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/pkg/minikube/assets"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
)
|
||||
|
||||
|
@ -50,11 +50,12 @@ var addonsListCmd = &cobra.Command{
|
|||
exit.UsageT("usage: minikube addons list")
|
||||
}
|
||||
|
||||
_, cc := mustload.Partial(ClusterFlagValue())
|
||||
switch strings.ToLower(addonListOutput) {
|
||||
case "list":
|
||||
printAddonsList()
|
||||
printAddonsList(cc)
|
||||
case "json":
|
||||
printAddonsJSON()
|
||||
printAddonsJSON(cc)
|
||||
default:
|
||||
exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'list', 'json'", addonListOutput))
|
||||
}
|
||||
|
@ -86,27 +87,24 @@ var stringFromStatus = func(addonStatus bool) string {
|
|||
return "disabled"
|
||||
}
|
||||
|
||||
var printAddonsList = func() {
|
||||
var printAddonsList = func(cc *config.ClusterConfig) {
|
||||
addonNames := make([]string, 0, len(assets.Addons))
|
||||
for addonName := range assets.Addons {
|
||||
addonNames = append(addonNames, addonName)
|
||||
}
|
||||
sort.Strings(addonNames)
|
||||
|
||||
var tData [][]string
|
||||
table := tablewriter.NewWriter(os.Stdout)
|
||||
table.SetHeader([]string{"Addon Name", "Profile", "Status"})
|
||||
table.SetAutoFormatHeaders(true)
|
||||
table.SetBorders(tablewriter.Border{Left: true, Top: true, Right: true, Bottom: true})
|
||||
table.SetCenterSeparator("|")
|
||||
pName := viper.GetString(config.ProfileName)
|
||||
|
||||
for _, addonName := range addonNames {
|
||||
addonBundle := assets.Addons[addonName]
|
||||
addonStatus, err := addonBundle.IsEnabled(pName)
|
||||
if err != nil {
|
||||
out.WarningT("Unable to get addon status for {{.name}}: {{.error}}", out.V{"name": addonName, "error": err})
|
||||
}
|
||||
tData = append(tData, []string{addonName, pName, fmt.Sprintf("%s %s", stringFromStatus(addonStatus), iconFromStatus(addonStatus))})
|
||||
enabled := addonBundle.IsEnabled(cc)
|
||||
tData = append(tData, []string{addonName, cc.Name, fmt.Sprintf("%s %s", stringFromStatus(enabled), iconFromStatus(enabled))})
|
||||
}
|
||||
|
||||
table.AppendBulk(tData)
|
||||
|
@ -121,9 +119,8 @@ var printAddonsList = func() {
|
|||
}
|
||||
}
|
||||
|
||||
var printAddonsJSON = func() {
|
||||
var printAddonsJSON = func(cc *config.ClusterConfig) {
|
||||
addonNames := make([]string, 0, len(assets.Addons))
|
||||
pName := viper.GetString(config.ProfileName)
|
||||
for addonName := range assets.Addons {
|
||||
addonNames = append(addonNames, addonName)
|
||||
}
|
||||
|
@ -133,16 +130,11 @@ var printAddonsJSON = func() {
|
|||
|
||||
for _, addonName := range addonNames {
|
||||
addonBundle := assets.Addons[addonName]
|
||||
|
||||
addonStatus, err := addonBundle.IsEnabled(pName)
|
||||
if err != nil {
|
||||
glog.Errorf("Unable to get addon status for %s: %v", addonName, err)
|
||||
continue
|
||||
}
|
||||
enabled := addonBundle.IsEnabled(cc)
|
||||
|
||||
addonsMap[addonName] = map[string]interface{}{
|
||||
"Status": stringFromStatus(addonStatus),
|
||||
"Profile": pName,
|
||||
"Status": stringFromStatus(enabled),
|
||||
"Profile": cc.Name,
|
||||
}
|
||||
}
|
||||
jsonString, _ := json.Marshal(addonsMap)
|
||||
|
|
|
@ -18,9 +18,7 @@ package config
|
|||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/pkg/addons"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
)
|
||||
|
@ -35,7 +33,7 @@ var addonsDisableCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
addon := args[0]
|
||||
err := addons.Set(addon, "false", viper.GetString(config.ProfileName))
|
||||
err := addons.SetAndSave(ClusterFlagValue(), addon, "false")
|
||||
if err != nil {
|
||||
exit.WithError("disable failed", err)
|
||||
}
|
||||
|
|
|
@ -18,9 +18,7 @@ package config
|
|||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/pkg/addons"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
)
|
||||
|
@ -34,7 +32,7 @@ var addonsEnableCmd = &cobra.Command{
|
|||
exit.UsageT("usage: minikube addons enable ADDON_NAME")
|
||||
}
|
||||
addon := args[0]
|
||||
err := addons.Set(addon, "true", viper.GetString(config.ProfileName))
|
||||
err := addons.SetAndSave(ClusterFlagValue(), addon, "true")
|
||||
if err != nil {
|
||||
exit.WithError("enable failed", err)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
/*
|
||||
Copyright 2020 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
)
|
||||
|
||||
// ClusterFlagValue returns the current cluster name based on flags
|
||||
func ClusterFlagValue() string {
|
||||
return viper.GetString(config.ProfileName)
|
||||
}
|
|
@ -21,7 +21,7 @@ import (
|
|||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
pkgConfig "k8s.io/minikube/pkg/minikube/config"
|
||||
config "k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
)
|
||||
|
||||
|
@ -59,5 +59,5 @@ func init() {
|
|||
|
||||
// Get gets a property
|
||||
func Get(name string) (string, error) {
|
||||
return pkgConfig.Get(name)
|
||||
return config.Get(name)
|
||||
}
|
||||
|
|
|
@ -18,19 +18,13 @@ package config
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"text/template"
|
||||
|
||||
"github.com/pkg/browser"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/pkg/minikube/assets"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
pkg_config "k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/browser"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
"k8s.io/minikube/pkg/minikube/service"
|
||||
)
|
||||
|
@ -62,36 +56,19 @@ var addonsOpenCmd = &cobra.Command{
|
|||
exit.UsageT("usage: minikube addons open ADDON_NAME")
|
||||
}
|
||||
addonName := args[0]
|
||||
// TODO(r2d4): config should not reference API, pull this out
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
exit.WithError("Error getting client", err)
|
||||
}
|
||||
defer api.Close()
|
||||
|
||||
profileName := viper.GetString(pkg_config.ProfileName)
|
||||
cc, err := config.Load(profileName)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting cluster", err)
|
||||
}
|
||||
cp, err := config.PrimaryControlPlane(cc)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting control plane", err)
|
||||
}
|
||||
if !machine.IsRunning(api, driver.MachineName(*cc, cp)) {
|
||||
os.Exit(1)
|
||||
}
|
||||
cname := ClusterFlagValue()
|
||||
co := mustload.Healthy(cname)
|
||||
|
||||
addon, ok := assets.Addons[addonName] // validate addon input
|
||||
if !ok {
|
||||
exit.WithCodeT(exit.Data, `addon '{{.name}}' is not a valid addon packaged with minikube.
|
||||
To see the list of available addons run:
|
||||
minikube addons list`, out.V{"name": addonName})
|
||||
}
|
||||
ok, err = addon.IsEnabled(profileName)
|
||||
if err != nil {
|
||||
exit.WithError("IsEnabled failed", err)
|
||||
}
|
||||
if !ok {
|
||||
|
||||
enabled := addon.IsEnabled(co.Config)
|
||||
if !enabled {
|
||||
exit.WithCodeT(exit.Unavailable, `addon '{{.name}}' is currently not enabled.
|
||||
To enable this addon run:
|
||||
minikube addons enable {{.name}}`, out.V{"name": addonName})
|
||||
|
@ -112,7 +89,7 @@ You can add one by annotating a service with the label {{.labelName}}:{{.addonNa
|
|||
svc := serviceList.Items[i].ObjectMeta.Name
|
||||
var urlString []string
|
||||
|
||||
if urlString, err = service.WaitForService(api, namespace, svc, addonsURLTemplate, addonsURLMode, https, wait, interval); err != nil {
|
||||
if urlString, err = service.WaitForService(co.API, namespace, svc, addonsURLTemplate, addonsURLMode, https, wait, interval); err != nil {
|
||||
exit.WithCodeT(exit.Unavailable, "Wait failed: {{.error}}", out.V{"error": err})
|
||||
}
|
||||
|
||||
|
|
|
@ -20,11 +20,10 @@ import (
|
|||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
pkgConfig "k8s.io/minikube/pkg/minikube/config"
|
||||
pkg_config "k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/kubeconfig"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
)
|
||||
|
||||
|
@ -35,7 +34,7 @@ var ProfileCmd = &cobra.Command{
|
|||
Long: "profile sets the current minikube profile, or gets the current profile if no arguments are provided. This is used to run and manage multiple minikube instance. You can return to the default minikube profile by running `minikube profile default`",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if len(args) == 0 {
|
||||
profile := viper.GetString(pkgConfig.ProfileName)
|
||||
profile := ClusterFlagValue()
|
||||
out.T(out.Empty, profile)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
@ -49,9 +48,8 @@ var ProfileCmd = &cobra.Command{
|
|||
we need to add code over here to check whether the profile
|
||||
name is in the list of reserved keywords
|
||||
*/
|
||||
if pkgConfig.ProfileNameInReservedKeywords(profile) {
|
||||
out.ErrT(out.FailureType, `Profile name "{{.profilename}}" is minikube keyword. To delete profile use command minikube delete -p <profile name> `, out.V{"profilename": profile})
|
||||
os.Exit(0)
|
||||
if config.ProfileNameInReservedKeywords(profile) {
|
||||
exit.WithCodeT(exit.Config, `Profile name "{{.profilename}}" is reserved keyword. To delete this profile, run: "{{.cmd}}"`, out.V{"profilename": profile, "cmd": mustload.ExampleCmd(profile, "delete")})
|
||||
}
|
||||
|
||||
if profile == "default" {
|
||||
|
@ -64,18 +62,18 @@ var ProfileCmd = &cobra.Command{
|
|||
}
|
||||
}
|
||||
|
||||
if !pkgConfig.ProfileExists(profile) {
|
||||
if !config.ProfileExists(profile) {
|
||||
out.ErrT(out.Tip, `if you want to create a profile you can by this command: minikube start -p {{.profile_name}}`, out.V{"profile_name": profile})
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
err := Set(pkgConfig.ProfileName, profile)
|
||||
err := Set(config.ProfileName, profile)
|
||||
if err != nil {
|
||||
exit.WithError("Setting profile failed", err)
|
||||
}
|
||||
cc, err := pkgConfig.Load(profile)
|
||||
cc, err := config.Load(profile)
|
||||
// might err when loading older version of cfg file that doesn't have KeepContext field
|
||||
if err != nil && !pkg_config.IsNotExist(err) {
|
||||
if err != nil && !config.IsNotExist(err) {
|
||||
out.ErrT(out.Sad, `Error loading profile config: {{.error}}`, out.V{"error": err})
|
||||
}
|
||||
if err == nil {
|
||||
|
|
|
@ -91,13 +91,13 @@ var printProfilesTable = func() {
|
|||
table.Render()
|
||||
|
||||
if invalidProfiles != nil {
|
||||
out.T(out.Warning, "Found {{.number}} invalid profile(s) ! ", out.V{"number": len(invalidProfiles)})
|
||||
out.WarningT("Found {{.number}} invalid profile(s) ! ", out.V{"number": len(invalidProfiles)})
|
||||
for _, p := range invalidProfiles {
|
||||
out.T(out.Empty, "\t "+p.Name)
|
||||
out.ErrT(out.Empty, "\t "+p.Name)
|
||||
}
|
||||
out.T(out.Tip, "You can delete them using the following command(s): ")
|
||||
out.ErrT(out.Tip, "You can delete them using the following command(s): ")
|
||||
for _, p := range invalidProfiles {
|
||||
out.String(fmt.Sprintf("\t $ minikube delete -p %s \n", p.Name))
|
||||
out.Err(fmt.Sprintf("\t $ minikube delete -p %s \n", p.Name))
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@ package config
|
|||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
pkgConfig "k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/localpath"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
|
@ -61,11 +61,11 @@ func Set(name string, value string) error {
|
|||
}
|
||||
|
||||
// Set the value
|
||||
config, err := pkgConfig.ReadConfig(localpath.ConfigFile())
|
||||
cc, err := config.ReadConfig(localpath.ConfigFile())
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "read config file %q", localpath.ConfigFile())
|
||||
}
|
||||
err = s.set(config, name, value)
|
||||
err = s.set(cc, name, value)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "set")
|
||||
}
|
||||
|
@ -77,5 +77,5 @@ func Set(name string, value string) error {
|
|||
}
|
||||
|
||||
// Write the value
|
||||
return pkgConfig.WriteConfig(localpath.ConfigFile(), config)
|
||||
return config.WriteConfig(localpath.ConfigFile(), cc)
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ package config
|
|||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
pkgConfig "k8s.io/minikube/pkg/minikube/config"
|
||||
config "k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/localpath"
|
||||
)
|
||||
|
@ -44,10 +44,10 @@ func init() {
|
|||
|
||||
// Unset unsets a property
|
||||
func Unset(name string) error {
|
||||
m, err := pkgConfig.ReadConfig(localpath.ConfigFile())
|
||||
m, err := config.ReadConfig(localpath.ConfigFile())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
delete(m, name)
|
||||
return pkgConfig.WriteConfig(localpath.ConfigFile(), m)
|
||||
return config.WriteConfig(localpath.ConfigFile(), m)
|
||||
}
|
||||
|
|
|
@ -20,11 +20,11 @@ import (
|
|||
"fmt"
|
||||
"testing"
|
||||
|
||||
pkgConfig "k8s.io/minikube/pkg/minikube/config"
|
||||
config "k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
)
|
||||
|
||||
var minikubeConfig = pkgConfig.MinikubeConfig{
|
||||
var minikubeConfig = config.MinikubeConfig{
|
||||
"driver": driver.KVM2,
|
||||
"cpus": 12,
|
||||
"show-libmachine-logs": true,
|
||||
|
@ -83,21 +83,10 @@ func TestSetBool(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestValidateProfile(t *testing.T) {
|
||||
testCases := []struct {
|
||||
profileName string
|
||||
}{
|
||||
{
|
||||
profileName: "82374328742_2974224498",
|
||||
},
|
||||
{
|
||||
profileName: "validate_test",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
profileNam := test.profileName
|
||||
expected := fmt.Sprintf("profile %q not found", test.profileName)
|
||||
err, ok := ValidateProfile(profileNam)
|
||||
testCases := []string{"82374328742_2974224498", "validate_test"}
|
||||
for _, name := range testCases {
|
||||
expected := fmt.Sprintf("profile %q not found", name)
|
||||
err, ok := ValidateProfile(name)
|
||||
if !ok && err.Error() != expected {
|
||||
t.Errorf("got error %q, expected %q", err, expected)
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ func IsValidDriver(string, name string) error {
|
|||
|
||||
// RequiresRestartMsg returns the "requires restart" message
|
||||
func RequiresRestartMsg(string, string) error {
|
||||
out.T(out.Warning, "These changes will take effect upon a minikube delete and then a minikube start")
|
||||
out.WarningT("These changes will take effect upon a minikube delete and then a minikube start")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -21,25 +21,20 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/docker/machine/libmachine/mcnerror"
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/browser"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
pkgaddons "k8s.io/minikube/pkg/addons"
|
||||
"k8s.io/minikube/pkg/addons"
|
||||
"k8s.io/minikube/pkg/minikube/assets"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
pkg_config "k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
|
||||
"k8s.io/minikube/pkg/minikube/browser"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
"k8s.io/minikube/pkg/minikube/proxy"
|
||||
"k8s.io/minikube/pkg/minikube/service"
|
||||
|
@ -59,68 +54,27 @@ var dashboardCmd = &cobra.Command{
|
|||
Short: "Access the kubernetes dashboard running within the minikube cluster",
|
||||
Long: `Access the kubernetes dashboard running within the minikube cluster`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
profileName := viper.GetString(pkg_config.ProfileName)
|
||||
cc, err := pkg_config.Load(profileName)
|
||||
if err != nil && !pkg_config.IsNotExist(err) {
|
||||
exit.WithError("Error loading profile config", err)
|
||||
}
|
||||
cname := ClusterFlagValue()
|
||||
co := mustload.Healthy(cname)
|
||||
|
||||
if err != nil {
|
||||
out.ErrT(out.Meh, `"{{.name}}" profile does not exist`, out.V{"name": profileName})
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
api, err := machine.NewAPIClient()
|
||||
defer func() {
|
||||
err := api.Close()
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to close API: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
exit.WithError("Error getting client", err)
|
||||
}
|
||||
|
||||
cp, err := config.PrimaryControlPlane(cc)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting primary control plane", err)
|
||||
}
|
||||
|
||||
machineName := driver.MachineName(*cc, cp)
|
||||
if _, err = api.Load(machineName); err != nil {
|
||||
switch err := errors.Cause(err).(type) {
|
||||
case mcnerror.ErrHostDoesNotExist:
|
||||
exit.WithCodeT(exit.Unavailable, "{{.name}} cluster does not exist", out.V{"name": cc.Name})
|
||||
default:
|
||||
exit.WithError("Error getting cluster", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, n := range cc.Nodes {
|
||||
err = proxy.ExcludeIP(n.IP) // to be used for http get calls
|
||||
if err != nil {
|
||||
for _, n := range co.Config.Nodes {
|
||||
if err := proxy.ExcludeIP(n.IP); err != nil {
|
||||
glog.Errorf("Error excluding IP from proxy: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
kubectl, err := exec.LookPath("kubectl")
|
||||
if err != nil {
|
||||
exit.WithCodeT(exit.NoInput, "kubectl not found in PATH, but is required for the dashboard. Installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl/")
|
||||
}
|
||||
|
||||
if !machine.IsRunning(api, machineName) {
|
||||
os.Exit(1)
|
||||
}
|
||||
kubectlVersion := co.Config.KubernetesConfig.KubernetesVersion
|
||||
var err error
|
||||
|
||||
// Check dashboard status before enabling it
|
||||
dashboardAddon := assets.Addons["dashboard"]
|
||||
dashboardStatus, _ := dashboardAddon.IsEnabled(profileName)
|
||||
if !dashboardStatus {
|
||||
addon := assets.Addons["dashboard"]
|
||||
enabled := addon.IsEnabled(co.Config)
|
||||
|
||||
if !enabled {
|
||||
// Send status messages to stderr for folks re-using this output.
|
||||
out.ErrT(out.Enabling, "Enabling dashboard ...")
|
||||
// Enable the dashboard add-on
|
||||
err = pkgaddons.Set("dashboard", "true", profileName)
|
||||
err = addons.SetAndSave(cname, "dashboard", "true")
|
||||
if err != nil {
|
||||
exit.WithError("Unable to enable dashboard", err)
|
||||
}
|
||||
|
@ -135,7 +89,7 @@ var dashboardCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
out.ErrT(out.Launch, "Launching proxy ...")
|
||||
p, hostPort, err := kubectlProxy(kubectl, machineName)
|
||||
p, hostPort, err := kubectlProxy(kubectlVersion, cname)
|
||||
if err != nil {
|
||||
exit.WithError("kubectl proxy", err)
|
||||
}
|
||||
|
@ -169,10 +123,17 @@ var dashboardCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
// kubectlProxy runs "kubectl proxy", returning host:port
|
||||
func kubectlProxy(path string, machineName string) (*exec.Cmd, string, error) {
|
||||
func kubectlProxy(kubectlVersion string, contextName string) (*exec.Cmd, string, error) {
|
||||
// port=0 picks a random system port
|
||||
|
||||
cmd := exec.Command(path, "--context", machineName, "proxy", "--port=0")
|
||||
kubectlArgs := []string{"--context", contextName, "proxy", "--port=0"}
|
||||
|
||||
var cmd *exec.Cmd
|
||||
if kubectl, err := exec.LookPath("kubectl"); err == nil {
|
||||
cmd = exec.Command(kubectl, kubectlArgs...)
|
||||
} else if cmd, err = KubectlCommand(kubectlVersion, kubectlArgs...); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
stdoutPipe, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
|
|
|
@ -141,10 +141,10 @@ func runDelete(cmd *cobra.Command, args []string) {
|
|||
exit.UsageT("usage: minikube delete")
|
||||
}
|
||||
|
||||
profileName := viper.GetString(config.ProfileName)
|
||||
profile, err := config.LoadProfile(profileName)
|
||||
cname := ClusterFlagValue()
|
||||
profile, err := config.LoadProfile(cname)
|
||||
if err != nil {
|
||||
out.ErrT(out.Meh, `"{{.name}}" profile does not exist, trying anyways.`, out.V{"name": profileName})
|
||||
out.ErrT(out.Meh, `"{{.name}}" profile does not exist, trying anyways.`, out.V{"name": cname})
|
||||
}
|
||||
|
||||
errs := DeleteProfiles([]*config.Profile{profile})
|
||||
|
@ -208,7 +208,13 @@ func deleteProfileContainersAndVolumes(name string) {
|
|||
|
||||
func deleteProfile(profile *config.Profile) error {
|
||||
viper.Set(config.ProfileName, profile.Name)
|
||||
deleteProfileContainersAndVolumes(profile.Name)
|
||||
if profile.Config != nil {
|
||||
// if driver is oci driver, delete containers and volumes
|
||||
if driver.IsKIC(profile.Config.Driver) {
|
||||
out.T(out.DeletingHost, `Deleting "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": profile.Name, "driver_name": profile.Config.Driver})
|
||||
deleteProfileContainersAndVolumes(profile.Name)
|
||||
}
|
||||
}
|
||||
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
|
@ -236,7 +242,7 @@ func deleteProfile(profile *config.Profile) error {
|
|||
}
|
||||
|
||||
if err := killMountProcess(); err != nil {
|
||||
out.T(out.FailureType, "Failed to kill mount process: {{.error}}", out.V{"error": err})
|
||||
out.FailureT("Failed to kill mount process: {{.error}}", out.V{"error": err})
|
||||
}
|
||||
|
||||
deleteHosts(api, cc)
|
||||
|
@ -264,7 +270,7 @@ func deleteHosts(api libmachine.API, cc *config.ClusterConfig) {
|
|||
case mcnerror.ErrHostDoesNotExist:
|
||||
glog.Infof("Host %s does not exist. Proceeding ahead with cleanup.", machineName)
|
||||
default:
|
||||
out.T(out.FailureType, "Failed to delete cluster: {{.error}}", out.V{"error": err})
|
||||
out.FailureT("Failed to delete cluster: {{.error}}", out.V{"error": err})
|
||||
out.T(out.Notice, `You may need to manually remove the "{{.name}}" VM from your hypervisor`, out.V{"name": machineName})
|
||||
}
|
||||
}
|
||||
|
@ -272,13 +278,13 @@ func deleteHosts(api libmachine.API, cc *config.ClusterConfig) {
|
|||
}
|
||||
}
|
||||
|
||||
func deleteConfig(profileName string) error {
|
||||
if err := config.DeleteProfile(profileName); err != nil {
|
||||
func deleteConfig(cname string) error {
|
||||
if err := config.DeleteProfile(cname); err != nil {
|
||||
if config.IsNotExist(err) {
|
||||
delErr := profileDeletionErr(profileName, fmt.Sprintf("\"%s\" profile does not exist", profileName))
|
||||
delErr := profileDeletionErr(cname, fmt.Sprintf("\"%s\" profile does not exist", cname))
|
||||
return DeletionError{Err: delErr, Errtype: MissingProfile}
|
||||
}
|
||||
delErr := profileDeletionErr(profileName, fmt.Sprintf("failed to remove profile %v", err))
|
||||
delErr := profileDeletionErr(cname, fmt.Sprintf("failed to remove profile %v", err))
|
||||
return DeletionError{Err: delErr, Errtype: Fatal}
|
||||
}
|
||||
return nil
|
||||
|
@ -317,8 +323,8 @@ func deleteInvalidProfile(profile *config.Profile) []error {
|
|||
return errs
|
||||
}
|
||||
|
||||
func profileDeletionErr(profileName string, additionalInfo string) error {
|
||||
return fmt.Errorf("error deleting profile \"%s\": %s", profileName, additionalInfo)
|
||||
func profileDeletionErr(cname string, additionalInfo string) error {
|
||||
return fmt.Errorf("error deleting profile \"%s\": %s", cname, additionalInfo)
|
||||
}
|
||||
|
||||
func uninstallKubernetes(api libmachine.API, cc config.ClusterConfig, n config.Node, bsName string) error {
|
||||
|
@ -402,7 +408,7 @@ func deleteProfileDirectory(profile string) {
|
|||
out.T(out.DeletingHost, `Removing {{.directory}} ...`, out.V{"directory": machineDir})
|
||||
err := os.RemoveAll(machineDir)
|
||||
if err != nil {
|
||||
exit.WithError("Unable to remove machine directory: %v", err)
|
||||
exit.WithError("Unable to remove machine directory", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,19 +27,17 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/machine/libmachine/drivers"
|
||||
"github.com/docker/machine/libmachine/state"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/pkg/drivers/kic/oci"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/command"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/localpath"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
"k8s.io/minikube/pkg/minikube/shell"
|
||||
"k8s.io/minikube/pkg/minikube/sysinit"
|
||||
)
|
||||
|
||||
var dockerEnvTmpl = fmt.Sprintf("{{ .Prefix }}%s{{ .Delimiter }}{{ .DockerTLSVerify }}{{ .Suffix }}{{ .Prefix }}%s{{ .Delimiter }}{{ .DockerHost }}{{ .Suffix }}{{ .Prefix }}%s{{ .Delimiter }}{{ .DockerCertPath }}{{ .Suffix }}{{ .Prefix }}%s{{ .Delimiter }}{{ .MinikubeDockerdProfile }}{{ .Suffix }}{{ if .NoProxyVar }}{{ .Prefix }}{{ .NoProxyVar }}{{ .Delimiter }}{{ .NoProxyValue }}{{ .Suffix }}{{end}}{{ .UsageHint }}", constants.DockerTLSVerifyEnv, constants.DockerHostEnv, constants.DockerCertPathEnv, constants.MinikubeActiveDockerdEnv)
|
||||
|
@ -117,18 +115,8 @@ func (EnvNoProxyGetter) GetNoProxyVar() (string, string) {
|
|||
}
|
||||
|
||||
// isDockerActive checks if Docker is active
|
||||
func isDockerActive(d drivers.Driver) (bool, error) {
|
||||
client, err := drivers.GetSSHClientFromDriver(d)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
output, err := client.Output("sudo systemctl is-active docker")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// systemd returns error code on inactive
|
||||
s := strings.TrimSpace(output)
|
||||
return err == nil && s == "active", nil
|
||||
func isDockerActive(r command.Runner) bool {
|
||||
return sysinit.New(r).Active("docker")
|
||||
}
|
||||
|
||||
// dockerEnvCmd represents the docker-env command
|
||||
|
@ -137,88 +125,62 @@ var dockerEnvCmd = &cobra.Command{
|
|||
Short: "Sets up docker env variables; similar to '$(docker-machine env)'",
|
||||
Long: `Sets up docker env variables; similar to '$(docker-machine env)'.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
exit.WithError("Error getting client", err)
|
||||
cname := ClusterFlagValue()
|
||||
co := mustload.Running(cname)
|
||||
driverName := co.CP.Host.DriverName
|
||||
|
||||
if driverName == driver.None {
|
||||
exit.UsageT(`'none' driver does not support 'minikube docker-env' command`)
|
||||
}
|
||||
defer api.Close()
|
||||
|
||||
profile := viper.GetString(config.ProfileName)
|
||||
cc, err := config.Load(profile)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting config", err)
|
||||
if co.Config.KubernetesConfig.ContainerRuntime != "docker" {
|
||||
exit.WithCodeT(exit.BadUsage, `The docker-env command is only compatible with the "docker" runtime, but this cluster was configured to use the "{{.runtime}}" runtime.`,
|
||||
out.V{"runtime": co.Config.KubernetesConfig.ContainerRuntime})
|
||||
}
|
||||
for _, n := range cc.Nodes {
|
||||
machineName := driver.MachineName(*cc, n)
|
||||
host, err := machine.LoadHost(api, machineName)
|
||||
|
||||
if ok := isDockerActive(co.CP.Runner); !ok {
|
||||
exit.WithCodeT(exit.Unavailable, `The docker service within '{{.name}}' is not active`, out.V{"name": cname})
|
||||
}
|
||||
|
||||
sh := shell.EnvConfig{
|
||||
Shell: shell.ForceShell,
|
||||
}
|
||||
|
||||
var err error
|
||||
port := constants.DockerDaemonPort
|
||||
if driver.NeedsPortForward(driverName) {
|
||||
port, err = oci.ForwardedPort(driverName, cname, port)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting host", err)
|
||||
}
|
||||
if host.Driver.DriverName() == driver.None {
|
||||
exit.UsageT(`'none' driver does not support 'minikube docker-env' command`)
|
||||
exit.WithCodeT(exit.Failure, "Error getting port binding for '{{.driver_name}} driver: {{.error}}", out.V{"driver_name": driverName, "error": err})
|
||||
}
|
||||
}
|
||||
|
||||
hostSt, err := machine.Status(api, machineName)
|
||||
ec := DockerEnvConfig{
|
||||
EnvConfig: sh,
|
||||
profile: cname,
|
||||
driver: driverName,
|
||||
hostIP: co.CP.IP.String(),
|
||||
port: port,
|
||||
certsDir: localpath.MakeMiniPath("certs"),
|
||||
noProxy: noProxy,
|
||||
}
|
||||
|
||||
if ec.Shell == "" {
|
||||
ec.Shell, err = shell.Detect()
|
||||
if err != nil {
|
||||
exit.WithError("Error getting host status", err)
|
||||
}
|
||||
if hostSt != state.Running.String() {
|
||||
exit.WithCodeT(exit.Unavailable, `'{{.profile}}' is not running`, out.V{"profile": profile})
|
||||
}
|
||||
ok, err := isDockerActive(host.Driver)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting service status", err)
|
||||
exit.WithError("Error detecting shell", err)
|
||||
}
|
||||
}
|
||||
|
||||
if !ok {
|
||||
exit.WithCodeT(exit.Unavailable, `The docker service within '{{.profile}}' is not active`, out.V{"profile": profile})
|
||||
if dockerUnset {
|
||||
if err := dockerUnsetScript(ec, os.Stdout); err != nil {
|
||||
exit.WithError("Error generating unset output", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
hostIP, err := host.Driver.GetIP()
|
||||
if err != nil {
|
||||
exit.WithError("Error getting host IP", err)
|
||||
}
|
||||
|
||||
sh := shell.EnvConfig{
|
||||
Shell: shell.ForceShell,
|
||||
}
|
||||
|
||||
port := constants.DockerDaemonPort
|
||||
if driver.IsKIC(host.DriverName) { // for kic we need to find what port docker/podman chose for us
|
||||
hostIP = oci.DefaultBindIPV4
|
||||
port, err = oci.ForwardedPort(host.DriverName, profile, port)
|
||||
if err != nil {
|
||||
exit.WithCodeT(exit.Failure, "Error getting port binding for '{{.driver_name}} driver: {{.error}}", out.V{"driver_name": host.DriverName, "error": err})
|
||||
}
|
||||
}
|
||||
|
||||
ec := DockerEnvConfig{
|
||||
EnvConfig: sh,
|
||||
profile: profile,
|
||||
driver: host.DriverName,
|
||||
hostIP: hostIP,
|
||||
port: port,
|
||||
certsDir: localpath.MakeMiniPath("certs"),
|
||||
noProxy: noProxy,
|
||||
}
|
||||
|
||||
if ec.Shell == "" {
|
||||
ec.Shell, err = shell.Detect()
|
||||
if err != nil {
|
||||
exit.WithError("Error detecting shell", err)
|
||||
}
|
||||
}
|
||||
|
||||
if dockerUnset {
|
||||
if err := dockerUnsetScript(ec, os.Stdout); err != nil {
|
||||
exit.WithError("Error generating unset output", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err := dockerSetScript(ec, os.Stdout); err != nil {
|
||||
exit.WithError("Error generating set output", err)
|
||||
}
|
||||
if err := dockerSetScript(ec, os.Stdout); err != nil {
|
||||
exit.WithError("Error generating set output", err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ import (
|
|||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/cobra/doc"
|
||||
"k8s.io/minikube/pkg/generate"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
)
|
||||
|
@ -43,7 +43,7 @@ var generateDocs = &cobra.Command{
|
|||
}
|
||||
|
||||
// generate docs
|
||||
if err := doc.GenMarkdownTree(RootCmd, path); err != nil {
|
||||
if err := generate.Docs(RootCmd, path); err != nil {
|
||||
exit.WithError("Unable to generate docs", err)
|
||||
}
|
||||
out.T(out.Documentation, "Docs have been saved at - {{.path}}", out.V{"path": path})
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
/*
|
||||
Copyright 2020 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"k8s.io/minikube/pkg/generate"
|
||||
)
|
||||
|
||||
func TestGenerateDocs(t *testing.T) {
|
||||
dir := "../../../site/content/en/docs/commands/"
|
||||
|
||||
for _, sc := range RootCmd.Commands() {
|
||||
t.Run(sc.Name(), func(t *testing.T) {
|
||||
if sc.Hidden {
|
||||
t.Skip()
|
||||
}
|
||||
fp := filepath.Join(dir, fmt.Sprintf("%s.md", sc.Name()))
|
||||
expectedContents, err := ioutil.ReadFile(fp)
|
||||
if err != nil {
|
||||
t.Fatalf("Docs are not updated. Please run `make generate-docs` to update commands documentation: %v", err)
|
||||
}
|
||||
actualContents, err := generate.DocForCommand(sc)
|
||||
if err != nil {
|
||||
t.Fatalf("error getting contents: %v", err)
|
||||
}
|
||||
if diff := cmp.Diff(actualContents, string(expectedContents)); diff != "" {
|
||||
t.Fatalf("Docs are not updated. Please run `make generate-docs` to update commands documentation: %s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -17,14 +17,8 @@ limitations under the License.
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/docker/machine/libmachine/mcnerror"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
)
|
||||
|
||||
|
@ -34,32 +28,7 @@ var ipCmd = &cobra.Command{
|
|||
Short: "Retrieves the IP address of the running cluster",
|
||||
Long: `Retrieves the IP address of the running cluster, and writes it to STDOUT.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
exit.WithError("Error getting client", err)
|
||||
}
|
||||
defer api.Close()
|
||||
|
||||
cc, err := config.Load(viper.GetString(config.ProfileName))
|
||||
if err != nil {
|
||||
exit.WithError("Error getting config", err)
|
||||
}
|
||||
for _, n := range cc.Nodes {
|
||||
machineName := driver.MachineName(*cc, n)
|
||||
host, err := api.Load(machineName)
|
||||
if err != nil {
|
||||
switch err := errors.Cause(err).(type) {
|
||||
case mcnerror.ErrHostDoesNotExist:
|
||||
exit.WithCodeT(exit.NoInput, `"{{.profile_name}}" host does not exist, unable to show an IP`, out.V{"profile_name": cc.Name})
|
||||
default:
|
||||
exit.WithError("Error getting host", err)
|
||||
}
|
||||
}
|
||||
ip, err := host.Driver.GetIP()
|
||||
if err != nil {
|
||||
exit.WithError("Error getting IP", err)
|
||||
}
|
||||
out.Ln(ip)
|
||||
}
|
||||
co := mustload.Running(ClusterFlagValue())
|
||||
out.Ln(co.CP.IP.String())
|
||||
},
|
||||
}
|
||||
|
|
|
@ -24,10 +24,8 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
"k8s.io/minikube/pkg/minikube/node"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
)
|
||||
|
@ -42,30 +40,15 @@ Examples:
|
|||
minikube kubectl -- --help
|
||||
minikube kubectl -- get pods --namespace kube-system`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error getting client: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer api.Close()
|
||||
co := mustload.Healthy(ClusterFlagValue())
|
||||
|
||||
cc, err := config.Load(viper.GetString(config.ProfileName))
|
||||
if err != nil && !config.IsNotExist(err) {
|
||||
out.ErrLn("Error loading profile config: %v", err)
|
||||
}
|
||||
|
||||
version := constants.DefaultKubernetesVersion
|
||||
if cc != nil {
|
||||
version = cc.KubernetesConfig.KubernetesVersion
|
||||
}
|
||||
|
||||
path, err := node.CacheKubectlBinary(version)
|
||||
version := co.Config.KubernetesConfig.KubernetesVersion
|
||||
c, err := KubectlCommand(version, args...)
|
||||
if err != nil {
|
||||
out.ErrLn("Error caching kubectl: %v", err)
|
||||
}
|
||||
|
||||
glog.Infof("Running %s %v", path, args)
|
||||
c := exec.Command(path, args...)
|
||||
c.Stdin = os.Stdin
|
||||
c.Stdout = os.Stdout
|
||||
c.Stderr = os.Stderr
|
||||
|
@ -82,3 +65,17 @@ minikube kubectl -- get pods --namespace kube-system`,
|
|||
}
|
||||
},
|
||||
}
|
||||
|
||||
// KubectlCommand will return kubectl command with a version matching the cluster
|
||||
func KubectlCommand(version string, args ...string) (*exec.Cmd, error) {
|
||||
if version == "" {
|
||||
version = constants.DefaultKubernetesVersion
|
||||
}
|
||||
|
||||
path, err := node.CacheKubectlBinary(version)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return exec.Command(path, args...), nil
|
||||
}
|
||||
|
|
|
@ -17,22 +17,22 @@ limitations under the License.
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config"
|
||||
"k8s.io/minikube/pkg/minikube/cluster"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/logs"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/node"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
)
|
||||
|
||||
const (
|
||||
// number of problems per log to output
|
||||
numberOfProblems = 5
|
||||
numberOfProblems = 10
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -51,64 +51,35 @@ var logsCmd = &cobra.Command{
|
|||
Short: "Gets the logs of the running instance, used for debugging minikube, not user code.",
|
||||
Long: `Gets the logs of the running instance, used for debugging minikube, not user code.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
cfg, err := config.Load(viper.GetString(config.ProfileName))
|
||||
if err != nil {
|
||||
exit.WithError("Error getting config", err)
|
||||
}
|
||||
co := mustload.Running(ClusterFlagValue())
|
||||
|
||||
if nodeName == "" {
|
||||
cp, err := config.PrimaryControlPlane(cfg)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting primary control plane", err)
|
||||
}
|
||||
nodeName = cp.Name
|
||||
}
|
||||
|
||||
n, _, err := node.Retrieve(cfg, nodeName)
|
||||
if err != nil {
|
||||
exit.WithError("Error retrieving node", err)
|
||||
}
|
||||
|
||||
machineName := driver.MachineName(*cfg, *n)
|
||||
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
exit.WithError("Error getting client", err)
|
||||
}
|
||||
defer api.Close()
|
||||
|
||||
h, err := api.Load(machineName)
|
||||
if err != nil {
|
||||
exit.WithError("api load", err)
|
||||
}
|
||||
runner, err := machine.CommandRunner(h)
|
||||
if err != nil {
|
||||
exit.WithError("command runner", err)
|
||||
}
|
||||
bs, err := cluster.Bootstrapper(api, viper.GetString(cmdcfg.Bootstrapper), *cfg, *n)
|
||||
bs, err := cluster.Bootstrapper(co.API, viper.GetString(cmdcfg.Bootstrapper), *co.Config, *co.CP.Node)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting cluster bootstrapper", err)
|
||||
}
|
||||
|
||||
cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: runner})
|
||||
cr, err := cruntime.New(cruntime.Config{Type: co.Config.KubernetesConfig.ContainerRuntime, Runner: co.CP.Runner})
|
||||
if err != nil {
|
||||
exit.WithError("Unable to get runtime", err)
|
||||
}
|
||||
if followLogs {
|
||||
err := logs.Follow(cr, bs, runner)
|
||||
err := logs.Follow(cr, bs, *co.Config, co.CP.Runner)
|
||||
if err != nil {
|
||||
exit.WithError("Follow", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if showProblems {
|
||||
problems := logs.FindProblems(cr, bs, runner)
|
||||
problems := logs.FindProblems(cr, bs, *co.Config, co.CP.Runner)
|
||||
logs.OutputProblems(problems, numberOfProblems)
|
||||
return
|
||||
}
|
||||
err = logs.Output(cr, bs, runner, numberOfLines)
|
||||
err = logs.Output(cr, bs, *co.Config, co.CP.Runner, numberOfLines)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting machine logs", err)
|
||||
out.Ln("")
|
||||
// Avoid exit.WithError, since it outputs the issue URL
|
||||
out.WarningT("{{.error}}", out.V{"error": err})
|
||||
os.Exit(exit.Unavailable)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
|
|
@ -30,12 +30,10 @@ import (
|
|||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/pkg/minikube/cluster"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
"k8s.io/minikube/third_party/go9p/ufs"
|
||||
)
|
||||
|
@ -99,30 +97,16 @@ var mountCmd = &cobra.Command{
|
|||
if glog.V(1) {
|
||||
debugVal = 1 // ufs.StartServer takes int debug param
|
||||
}
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
exit.WithError("Error getting client", err)
|
||||
}
|
||||
defer api.Close()
|
||||
cc, err := config.Load(viper.GetString(config.ProfileName))
|
||||
if err != nil {
|
||||
exit.WithError("Error getting config", err)
|
||||
}
|
||||
|
||||
cp, err := config.PrimaryControlPlane(cc)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting primary cp", err)
|
||||
}
|
||||
host, err := api.Load(driver.MachineName(*cc, cp))
|
||||
if err != nil {
|
||||
exit.WithError("Error loading api", err)
|
||||
}
|
||||
if host.Driver.DriverName() == driver.None {
|
||||
co := mustload.Running(ClusterFlagValue())
|
||||
if co.CP.Host.Driver.DriverName() == driver.None {
|
||||
exit.UsageT(`'none' driver does not support 'minikube mount' command`)
|
||||
}
|
||||
|
||||
var ip net.IP
|
||||
var err error
|
||||
if mountIP == "" {
|
||||
ip, err = cluster.GetVMHostIP(host)
|
||||
ip, err = cluster.GetVMHostIP(co.CP.Host)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting the host IP address to use from within the VM", err)
|
||||
}
|
||||
|
@ -159,11 +143,11 @@ var mountCmd = &cobra.Command{
|
|||
|
||||
// An escape valve to allow future hackers to try NFS, VirtFS, or other FS types.
|
||||
if !supportedFilesystems[cfg.Type] {
|
||||
out.T(out.Warning, "{{.type}} is not yet a supported filesystem. We will try anyways!", out.V{"type": cfg.Type})
|
||||
out.WarningT("{{.type}} is not yet a supported filesystem. We will try anyways!", out.V{"type": cfg.Type})
|
||||
}
|
||||
|
||||
bindIP := ip.String() // the ip to listen on the user's host machine
|
||||
if driver.IsKIC(host.Driver.DriverName()) && runtime.GOOS != "linux" {
|
||||
if driver.IsKIC(co.CP.Host.Driver.DriverName()) && runtime.GOOS != "linux" {
|
||||
bindIP = "127.0.0.1"
|
||||
}
|
||||
out.T(out.Mounting, "Mounting host path {{.sourcePath}} into VM as {{.destinationPath}} ...", out.V{"sourcePath": hostPath, "destinationPath": vmPath})
|
||||
|
@ -187,27 +171,21 @@ var mountCmd = &cobra.Command{
|
|||
}()
|
||||
}
|
||||
|
||||
// Use CommandRunner, as the native docker ssh service dies when Ctrl-C is received.
|
||||
runner, err := machine.CommandRunner(host)
|
||||
if err != nil {
|
||||
exit.WithError("Failed to get command runner", err)
|
||||
}
|
||||
|
||||
// Unmount if Ctrl-C or kill request is received.
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||
go func() {
|
||||
for sig := range c {
|
||||
out.T(out.Unmount, "Unmounting {{.path}} ...", out.V{"path": vmPath})
|
||||
err := cluster.Unmount(runner, vmPath)
|
||||
err := cluster.Unmount(co.CP.Runner, vmPath)
|
||||
if err != nil {
|
||||
out.ErrT(out.FailureType, "Failed unmount: {{.error}}", out.V{"error": err})
|
||||
out.FailureT("Failed unmount: {{.error}}", out.V{"error": err})
|
||||
}
|
||||
exit.WithCodeT(exit.Interrupted, "Received {{.name}} signal", out.V{"name": sig})
|
||||
}
|
||||
}()
|
||||
|
||||
err = cluster.Mount(runner, ip.String(), vmPath, cfg)
|
||||
err = cluster.Mount(co.CP.Runner, ip.String(), vmPath, cfg)
|
||||
if err != nil {
|
||||
exit.WithError("mount failed", err)
|
||||
}
|
||||
|
|
|
@ -23,10 +23,9 @@ import (
|
|||
|
||||
// nodeCmd represents the set of node subcommands
|
||||
var nodeCmd = &cobra.Command{
|
||||
Use: "node",
|
||||
Short: "Node operations",
|
||||
Long: "Operations on nodes",
|
||||
Hidden: true, // This won't be fully functional and thus should not be documented yet
|
||||
Use: "node",
|
||||
Short: "Node operations",
|
||||
Long: "Operations on nodes",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
exit.UsageT("Usage: minikube node [add|start|stop|delete]")
|
||||
},
|
||||
|
|
|
@ -17,13 +17,11 @@ limitations under the License.
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
"k8s.io/minikube/pkg/minikube/node"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
)
|
||||
|
@ -37,39 +35,41 @@ var nodeAddCmd = &cobra.Command{
|
|||
Short: "Adds a node to the given cluster.",
|
||||
Long: "Adds a node to the given cluster config, and starts it.",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
profile := viper.GetString(config.ProfileName)
|
||||
cc, err := config.Load(profile)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting config", err)
|
||||
co := mustload.Healthy(ClusterFlagValue())
|
||||
cc := co.Config
|
||||
|
||||
if driver.BareMetal(cc.Driver) {
|
||||
out.FailureT("none driver does not support multi-node clusters")
|
||||
}
|
||||
|
||||
//name := profile + strconv.Itoa(len(mc.Nodes)+1)
|
||||
name := fmt.Sprintf("m%d", len(cc.Nodes)+1)
|
||||
name := node.Name(len(cc.Nodes) + 1)
|
||||
|
||||
out.T(out.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": profile})
|
||||
out.T(out.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name})
|
||||
|
||||
n, err := node.Add(cc, name, cp, worker, "", profile)
|
||||
if err != nil {
|
||||
exit.WithError("Error adding node to cluster", err)
|
||||
// TODO: Deal with parameters better. Ideally we should be able to acceot any node-specific minikube start params here.
|
||||
n := config.Node{
|
||||
Name: name,
|
||||
Worker: worker,
|
||||
ControlPlane: cp,
|
||||
KubernetesVersion: cc.KubernetesConfig.KubernetesVersion,
|
||||
}
|
||||
|
||||
_, err = node.Start(*cc, *n, false, nil)
|
||||
if err != nil {
|
||||
exit.WithError("Error starting node", err)
|
||||
if err := node.Add(cc, n); err != nil {
|
||||
_, err := maybeDeleteAndRetry(*cc, n, nil, err)
|
||||
if err != nil {
|
||||
exit.WithError("failed to add node", err)
|
||||
}
|
||||
}
|
||||
|
||||
out.T(out.Ready, "Successfully added {{.name}} to {{.cluster}}!", out.V{"name": name, "cluster": profile})
|
||||
out.T(out.Ready, "Successfully added {{.name}} to {{.cluster}}!", out.V{"name": name, "cluster": cc.Name})
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
// TODO(https://github.com/kubernetes/minikube/issues/7366): We should figure out which minikube start flags to actually import
|
||||
nodeAddCmd.Flags().BoolVar(&cp, "control-plane", false, "If true, the node added will also be a control plane in addition to a worker.")
|
||||
nodeAddCmd.Flags().BoolVar(&worker, "worker", true, "If true, the added node will be marked for work. Defaults to true.")
|
||||
//We should figure out which of these flags to actually import
|
||||
startCmd.Flags().Visit(
|
||||
func(f *pflag.Flag) {
|
||||
nodeAddCmd.Flags().AddFlag(f)
|
||||
},
|
||||
)
|
||||
nodeAddCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.")
|
||||
|
||||
nodeCmd.AddCommand(nodeAddCmd)
|
||||
}
|
||||
|
|
|
@ -18,9 +18,8 @@ package cmd
|
|||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
"k8s.io/minikube/pkg/minikube/node"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
)
|
||||
|
@ -36,17 +35,11 @@ var nodeDeleteCmd = &cobra.Command{
|
|||
}
|
||||
name := args[0]
|
||||
|
||||
profile := viper.GetString(config.ProfileName)
|
||||
out.T(out.DeletingHost, "Deleting node {{.name}} from cluster {{.cluster}}", out.V{"name": name, "cluster": profile})
|
||||
co := mustload.Healthy(ClusterFlagValue())
|
||||
out.T(out.DeletingHost, "Deleting node {{.name}} from cluster {{.cluster}}", out.V{"name": name, "cluster": co.Config.Name})
|
||||
|
||||
cc, err := config.Load(profile)
|
||||
if err != nil {
|
||||
exit.WithError("loading config", err)
|
||||
}
|
||||
|
||||
err = node.Delete(*cc, name)
|
||||
if err != nil {
|
||||
out.FatalT("Failed to delete node {{.name}}", out.V{"name": name})
|
||||
if err := node.Delete(*co.Config, name); err != nil {
|
||||
exit.WithError("deleting node", err)
|
||||
}
|
||||
|
||||
out.T(out.Deleted, "Node {{.name}} was successfully deleted.", out.V{"name": name})
|
||||
|
|
|
@ -20,10 +20,9 @@ import (
|
|||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
"k8s.io/minikube/pkg/minikube/node"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
)
|
||||
|
@ -37,38 +36,46 @@ var nodeStartCmd = &cobra.Command{
|
|||
exit.UsageT("Usage: minikube node start [name]")
|
||||
}
|
||||
|
||||
api, cc := mustload.Partial(ClusterFlagValue())
|
||||
name := args[0]
|
||||
|
||||
// Make sure it's not running
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
exit.WithError("creating api client", err)
|
||||
}
|
||||
|
||||
if machine.IsRunning(api, name) {
|
||||
out.T(out.Check, "{{.name}} is already running", out.V{"name": name})
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
cc, err := config.Load(viper.GetString(config.ProfileName))
|
||||
if err != nil {
|
||||
exit.WithError("loading config", err)
|
||||
}
|
||||
|
||||
n, _, err := node.Retrieve(cc, name)
|
||||
if err != nil {
|
||||
exit.WithError("retrieving node", err)
|
||||
}
|
||||
|
||||
// Start it up baby
|
||||
_, err = node.Start(*cc, *n, false, nil)
|
||||
r, p, m, h, err := node.Provision(cc, n, false)
|
||||
if err != nil {
|
||||
out.FatalT("Failed to start node {{.name}}", out.V{"name": name})
|
||||
exit.WithError("provisioning host for node", err)
|
||||
}
|
||||
|
||||
s := node.Starter{
|
||||
Runner: r,
|
||||
PreExists: p,
|
||||
MachineAPI: m,
|
||||
Host: h,
|
||||
Cfg: cc,
|
||||
Node: n,
|
||||
ExistingAddons: nil,
|
||||
}
|
||||
|
||||
_, err = node.Start(s, false)
|
||||
if err != nil {
|
||||
_, err := maybeDeleteAndRetry(*cc, *n, nil, err)
|
||||
if err != nil {
|
||||
exit.WithError("failed to start node", err)
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
nodeStartCmd.Flags().String("name", "", "The name of the node to start")
|
||||
nodeStartCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.")
|
||||
nodeCmd.AddCommand(nodeStartCmd)
|
||||
}
|
||||
|
|
|
@ -18,11 +18,10 @@ package cmd
|
|||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
"k8s.io/minikube/pkg/minikube/node"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
)
|
||||
|
@ -37,16 +36,7 @@ var nodeStopCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
name := args[0]
|
||||
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
exit.WithError("creating api client", err)
|
||||
}
|
||||
|
||||
cc, err := config.Load(viper.GetString(config.ProfileName))
|
||||
if err != nil {
|
||||
exit.WithError("getting config", err)
|
||||
}
|
||||
api, cc := mustload.Partial(ClusterFlagValue())
|
||||
|
||||
n, _, err := node.Retrieve(cc, name)
|
||||
if err != nil {
|
||||
|
|
|
@ -37,10 +37,9 @@ var optionsCmd = &cobra.Command{
|
|||
// runOptions handles the executes the flow of "minikube options"
|
||||
func runOptions(cmd *cobra.Command, args []string) {
|
||||
out.String("The following options can be passed to any command:\n\n")
|
||||
for _, flagName := range viperWhiteList {
|
||||
f := pflag.Lookup(flagName)
|
||||
out.String(flagUsage(f))
|
||||
}
|
||||
cmd.Root().PersistentFlags().VisitAll(func(flag *pflag.Flag) {
|
||||
out.String(flagUsage(flag))
|
||||
})
|
||||
}
|
||||
|
||||
func flagUsage(flag *pflag.Flag) string {
|
||||
|
|
|
@ -17,7 +17,6 @@ limitations under the License.
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
@ -25,11 +24,12 @@ import (
|
|||
"github.com/spf13/viper"
|
||||
|
||||
"k8s.io/minikube/pkg/minikube/cluster"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
)
|
||||
|
||||
|
@ -46,27 +46,10 @@ var pauseCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
func runPause(cmd *cobra.Command, args []string) {
|
||||
cname := viper.GetString(config.ProfileName)
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
exit.WithError("Error getting client", err)
|
||||
}
|
||||
defer api.Close()
|
||||
cc, err := config.Load(cname)
|
||||
co := mustload.Running(ClusterFlagValue())
|
||||
|
||||
if err != nil && !config.IsNotExist(err) {
|
||||
exit.WithError("Error loading profile config", err)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
out.ErrT(out.Meh, `"{{.name}}" profile does not exist`, out.V{"name": cname})
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
glog.Infof("config: %+v", cc)
|
||||
|
||||
for _, n := range cc.Nodes {
|
||||
host, err := machine.LoadHost(api, driver.MachineName(*cc, n))
|
||||
for _, n := range co.Config.Nodes {
|
||||
host, err := machine.LoadHost(co.API, driver.MachineName(*co.Config, n))
|
||||
if err != nil {
|
||||
exit.WithError("Error getting host", err)
|
||||
}
|
||||
|
@ -76,7 +59,7 @@ func runPause(cmd *cobra.Command, args []string) {
|
|||
exit.WithError("Failed to get command runner", err)
|
||||
}
|
||||
|
||||
cr, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime, Runner: r})
|
||||
cr, err := cruntime.New(cruntime.Config{Type: co.Config.KubernetesConfig.ContainerRuntime, Runner: r})
|
||||
if err != nil {
|
||||
exit.WithError("Failed runtime", err)
|
||||
}
|
||||
|
@ -102,6 +85,6 @@ func runPause(cmd *cobra.Command, args []string) {
|
|||
}
|
||||
|
||||
func init() {
|
||||
pauseCmd.Flags().StringSliceVarP(&namespaces, "--namespaces", "n", cluster.DefaultNamespaces, "namespaces to pause")
|
||||
pauseCmd.Flags().StringSliceVarP(&namespaces, "--namespaces", "n", constants.DefaultNamespaces, "namespaces to pause")
|
||||
pauseCmd.Flags().BoolVarP(&allNamespaces, "all-namespaces", "A", false, "If set, pause all namespaces")
|
||||
}
|
||||
|
|
|
@ -27,16 +27,13 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/docker/machine/libmachine/drivers"
|
||||
"github.com/docker/machine/libmachine/host"
|
||||
"github.com/docker/machine/libmachine/ssh"
|
||||
"github.com/docker/machine/libmachine/state"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/command"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
"k8s.io/minikube/pkg/minikube/shell"
|
||||
)
|
||||
|
@ -67,15 +64,16 @@ func podmanShellCfgSet(ec PodmanEnvConfig, envMap map[string]string) *PodmanShel
|
|||
}
|
||||
|
||||
// isPodmanAvailable checks if Podman is available
|
||||
func isPodmanAvailable(host *host.Host) (bool, error) {
|
||||
// we need both "varlink bridge" and "podman varlink"
|
||||
if _, err := host.RunSSHCommand("which varlink"); err != nil {
|
||||
return false, err
|
||||
func isPodmanAvailable(r command.Runner) bool {
|
||||
if _, err := r.RunCmd(exec.Command("which", "varlink")); err != nil {
|
||||
return false
|
||||
}
|
||||
if _, err := host.RunSSHCommand("which podman"); err != nil {
|
||||
return false, err
|
||||
|
||||
if _, err := r.RunCmd(exec.Command("which", "podman")); err != nil {
|
||||
return false
|
||||
}
|
||||
return true, nil
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func createExternalSSHClient(d drivers.Driver) (*ssh.ExternalClient, error) {
|
||||
|
@ -108,75 +106,49 @@ var podmanEnvCmd = &cobra.Command{
|
|||
Short: "Sets up podman env variables; similar to '$(podman-machine env)'",
|
||||
Long: `Sets up podman env variables; similar to '$(podman-machine env)'.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
exit.WithError("Error getting client", err)
|
||||
cname := ClusterFlagValue()
|
||||
co := mustload.Running(cname)
|
||||
driverName := co.CP.Host.DriverName
|
||||
|
||||
if driverName == driver.None {
|
||||
exit.UsageT(`'none' driver does not support 'minikube podman-env' command`)
|
||||
}
|
||||
defer api.Close()
|
||||
|
||||
profile := viper.GetString(config.ProfileName)
|
||||
cc, err := config.Load(profile)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting config", err)
|
||||
if ok := isPodmanAvailable(co.CP.Runner); !ok {
|
||||
exit.WithCodeT(exit.Unavailable, `The podman service within '{{.cluster}}' is not active`, out.V{"cluster": cname})
|
||||
}
|
||||
for _, n := range cc.Nodes {
|
||||
machineName := driver.MachineName(*cc, n)
|
||||
host, err := machine.LoadHost(api, machineName)
|
||||
|
||||
client, err := createExternalSSHClient(co.CP.Host.Driver)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting ssh client", err)
|
||||
}
|
||||
|
||||
sh := shell.EnvConfig{
|
||||
Shell: shell.ForceShell,
|
||||
}
|
||||
ec := PodmanEnvConfig{
|
||||
EnvConfig: sh,
|
||||
profile: cname,
|
||||
driver: driverName,
|
||||
client: client,
|
||||
}
|
||||
|
||||
if ec.Shell == "" {
|
||||
ec.Shell, err = shell.Detect()
|
||||
if err != nil {
|
||||
exit.WithError("Error getting host", err)
|
||||
}
|
||||
if host.Driver.DriverName() == driver.None {
|
||||
exit.UsageT(`'none' driver does not support 'minikube podman-env' command`)
|
||||
exit.WithError("Error detecting shell", err)
|
||||
}
|
||||
}
|
||||
|
||||
hostSt, err := machine.Status(api, machineName)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting host status", err)
|
||||
}
|
||||
if hostSt != state.Running.String() {
|
||||
exit.WithCodeT(exit.Unavailable, `'{{.profile}}' is not running`, out.V{"profile": profile})
|
||||
}
|
||||
ok, err := isPodmanAvailable(host)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting service status", err)
|
||||
if podmanUnset {
|
||||
if err := podmanUnsetScript(ec, os.Stdout); err != nil {
|
||||
exit.WithError("Error generating unset output", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if !ok {
|
||||
exit.WithCodeT(exit.Unavailable, `The podman service within '{{.profile}}' is not active`, out.V{"profile": profile})
|
||||
}
|
||||
|
||||
client, err := createExternalSSHClient(host.Driver)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting ssh client", err)
|
||||
}
|
||||
|
||||
sh := shell.EnvConfig{
|
||||
Shell: shell.ForceShell,
|
||||
}
|
||||
ec := PodmanEnvConfig{
|
||||
EnvConfig: sh,
|
||||
profile: profile,
|
||||
driver: host.DriverName,
|
||||
client: client,
|
||||
}
|
||||
|
||||
if ec.Shell == "" {
|
||||
ec.Shell, err = shell.Detect()
|
||||
if err != nil {
|
||||
exit.WithError("Error detecting shell", err)
|
||||
}
|
||||
}
|
||||
|
||||
if podmanUnset {
|
||||
if err := podmanUnsetScript(ec, os.Stdout); err != nil {
|
||||
exit.WithError("Error generating unset output", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err := podmanSetScript(ec, os.Stdout); err != nil {
|
||||
exit.WithError("Error generating set output", err)
|
||||
}
|
||||
if err := podmanSetScript(ec, os.Stdout); err != nil {
|
||||
exit.WithError("Error generating set output", err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
|
@ -29,17 +30,13 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/browser"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"k8s.io/minikube/pkg/drivers/kic/oci"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
pkg_config "k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/browser"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/localpath"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
"k8s.io/minikube/pkg/minikube/service"
|
||||
"k8s.io/minikube/pkg/minikube/tunnel/kic"
|
||||
|
@ -77,33 +74,22 @@ var serviceCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
svc := args[0]
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
exit.WithError("Error getting client", err)
|
||||
}
|
||||
defer api.Close()
|
||||
|
||||
profileName := viper.GetString(pkg_config.ProfileName)
|
||||
cfg, err := config.Load(profileName)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting config", err)
|
||||
}
|
||||
cp, err := config.PrimaryControlPlane(cfg)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting control plane", err)
|
||||
}
|
||||
machineName := driver.MachineName(*cfg, cp)
|
||||
if !machine.IsRunning(api, machineName) {
|
||||
os.Exit(1)
|
||||
}
|
||||
cname := ClusterFlagValue()
|
||||
co := mustload.Healthy(cname)
|
||||
|
||||
if runtime.GOOS == "darwin" && cfg.Driver == oci.Docker {
|
||||
startKicServiceTunnel(svc, cfg.Name)
|
||||
if runtime.GOOS == "darwin" && co.Config.Driver == oci.Docker {
|
||||
startKicServiceTunnel(svc, cname)
|
||||
return
|
||||
}
|
||||
|
||||
urls, err := service.WaitForService(api, namespace, svc, serviceURLTemplate, serviceURLMode, https, wait, interval)
|
||||
urls, err := service.WaitForService(co.API, namespace, svc, serviceURLTemplate, serviceURLMode, https, wait, interval)
|
||||
if err != nil {
|
||||
var s *service.SVCNotFoundError
|
||||
if errors.As(err, &s) {
|
||||
exit.WithCodeT(exit.Data, `Service '{{.service}}' was not found in '{{.namespace}}' namespace.
|
||||
You may select another namespace by using 'minikube service {{.service}} -n <namespace>'. Or list out all the services using 'minikube service list'`, out.V{"service": svc, "namespace": namespace})
|
||||
}
|
||||
exit.WithError("Error opening service", err)
|
||||
}
|
||||
|
||||
|
@ -151,7 +137,7 @@ func startKicServiceTunnel(svc, configName string) {
|
|||
service.PrintServiceList(os.Stdout, data)
|
||||
|
||||
openURLs(svc, urls)
|
||||
out.T(out.Warning, "Because you are using docker driver on Mac, the terminal needs to be open to run it.")
|
||||
out.WarningT("Because you are using docker driver on Mac, the terminal needs to be open to run it.")
|
||||
|
||||
<-ctrlC
|
||||
|
||||
|
|
|
@ -22,14 +22,10 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
core "k8s.io/api/core/v1"
|
||||
"k8s.io/minikube/pkg/drivers/kic/oci"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
pkg_config "k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
"k8s.io/minikube/pkg/minikube/service"
|
||||
)
|
||||
|
@ -42,24 +38,9 @@ var serviceListCmd = &cobra.Command{
|
|||
Short: "Lists the URLs for the services in your local cluster",
|
||||
Long: `Lists the URLs for the services in your local cluster`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
exit.WithError("Error getting client", err)
|
||||
}
|
||||
defer api.Close()
|
||||
profileName := viper.GetString(pkg_config.ProfileName)
|
||||
cfg, err := config.Load(profileName)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting config", err)
|
||||
}
|
||||
cp, err := config.PrimaryControlPlane(cfg)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting primary control plane", err)
|
||||
}
|
||||
if !machine.IsRunning(api, driver.MachineName(*cfg, cp)) {
|
||||
exit.WithCodeT(exit.Unavailable, "profile {{.name}} is not running.", out.V{"name": profileName})
|
||||
}
|
||||
serviceURLs, err := service.GetServiceURLs(api, serviceListNamespace, serviceURLTemplate)
|
||||
co := mustload.Healthy(ClusterFlagValue())
|
||||
|
||||
serviceURLs, err := service.GetServiceURLs(co.API, serviceListNamespace, serviceURLTemplate)
|
||||
if err != nil {
|
||||
out.FatalT("Failed to get service URL: {{.error}}", out.V{"error": err})
|
||||
out.ErrT(out.Notice, "Check that minikube is running and that you have specified the correct namespace (-n flag) if required.")
|
||||
|
@ -71,14 +52,15 @@ var serviceListCmd = &cobra.Command{
|
|||
if len(serviceURL.URLs) == 0 {
|
||||
data = append(data, []string{serviceURL.Namespace, serviceURL.Name, "No node port"})
|
||||
} else {
|
||||
servicePortNames := strings.Join(serviceURL.PortNames, "\n")
|
||||
serviceURLs := strings.Join(serviceURL.URLs, "\n")
|
||||
|
||||
// if we are running Docker on OSX we empty the internal service URLs
|
||||
if runtime.GOOS == "darwin" && cfg.Driver == oci.Docker {
|
||||
if runtime.GOOS == "darwin" && co.Config.Driver == oci.Docker {
|
||||
serviceURLs = ""
|
||||
}
|
||||
|
||||
data = append(data, []string{serviceURL.Namespace, serviceURL.Name, "", serviceURLs})
|
||||
data = append(data, []string{serviceURL.Namespace, serviceURL.Name, servicePortNames, serviceURLs})
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,10 +20,8 @@ import (
|
|||
"path/filepath"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/localpath"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
)
|
||||
|
||||
|
@ -33,10 +31,7 @@ var sshKeyCmd = &cobra.Command{
|
|||
Short: "Retrieve the ssh identity key path of the specified cluster",
|
||||
Long: "Retrieve the ssh identity key path of the specified cluster.",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
cc, err := config.Load(viper.GetString(config.ProfileName))
|
||||
if err != nil {
|
||||
exit.WithError("Getting machine config failed", err)
|
||||
}
|
||||
_, cc := mustload.Partial(ClusterFlagValue())
|
||||
out.Ln(filepath.Join(localpath.MiniPath(), "machines", cc.Name, "id_rsa"))
|
||||
},
|
||||
}
|
||||
|
|
|
@ -21,12 +21,13 @@ import (
|
|||
|
||||
"github.com/docker/machine/libmachine/ssh"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
"k8s.io/minikube/pkg/minikube/node"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
)
|
||||
|
||||
|
@ -40,34 +41,30 @@ var sshCmd = &cobra.Command{
|
|||
Short: "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'",
|
||||
Long: "Log into or run a command on a machine with SSH; similar to 'docker-machine ssh'.",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
exit.WithError("Error getting client", err)
|
||||
}
|
||||
defer api.Close()
|
||||
cc, err := config.Load(viper.GetString(config.ProfileName))
|
||||
if err != nil {
|
||||
exit.WithError("Error getting config", err)
|
||||
}
|
||||
// TODO: allow choice of node to ssh into
|
||||
cp, err := config.PrimaryControlPlane(cc)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting primary control plane", err)
|
||||
}
|
||||
host, err := machine.LoadHost(api, driver.MachineName(*cc, cp))
|
||||
if err != nil {
|
||||
exit.WithError("Error getting host", err)
|
||||
}
|
||||
if host.Driver.DriverName() == driver.None {
|
||||
cname := ClusterFlagValue()
|
||||
co := mustload.Running(cname)
|
||||
if co.CP.Host.DriverName == driver.None {
|
||||
exit.UsageT("'none' driver does not support 'minikube ssh' command")
|
||||
}
|
||||
|
||||
var err error
|
||||
var n *config.Node
|
||||
if nodeName == "" {
|
||||
n = co.CP.Node
|
||||
} else {
|
||||
n, _, err = node.Retrieve(co.Config, nodeName)
|
||||
if err != nil {
|
||||
exit.WithCodeT(exit.Unavailable, "Node {{.nodeName}} does not exist.", out.V{"nodeName": nodeName})
|
||||
}
|
||||
}
|
||||
|
||||
if nativeSSHClient {
|
||||
ssh.SetDefaultClient(ssh.Native)
|
||||
} else {
|
||||
ssh.SetDefaultClient(ssh.External)
|
||||
}
|
||||
|
||||
err = machine.CreateSSHShell(api, *cc, cp, args)
|
||||
err = machine.CreateSSHShell(co.API, *co.Config, *n, args)
|
||||
if err != nil {
|
||||
// This is typically due to a non-zero exit code, so no need for flourish.
|
||||
out.ErrLn("ssh: %v", err)
|
||||
|
@ -78,5 +75,6 @@ var sshCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
func init() {
|
||||
sshCmd.Flags().BoolVar(&nativeSSHClient, nativeSSH, true, "Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'.")
|
||||
sshCmd.Flags().Bool(nativeSSH, true, "Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'.")
|
||||
sshCmd.Flags().StringVarP(&nodeName, "node", "n", "", "The node to ssh into. Defaults to the primary control plane.")
|
||||
}
|
||||
|
|
|
@ -27,7 +27,6 @@ import (
|
|||
"os/user"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/docker/machine/libmachine/ssh"
|
||||
|
@ -47,81 +46,24 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||
"k8s.io/minikube/pkg/minikube/download"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/kubeconfig"
|
||||
"k8s.io/minikube/pkg/minikube/localpath"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
"k8s.io/minikube/pkg/minikube/node"
|
||||
"k8s.io/minikube/pkg/minikube/notify"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
"k8s.io/minikube/pkg/minikube/proxy"
|
||||
"k8s.io/minikube/pkg/minikube/registry"
|
||||
"k8s.io/minikube/pkg/minikube/translate"
|
||||
"k8s.io/minikube/pkg/util"
|
||||
pkgutil "k8s.io/minikube/pkg/util"
|
||||
"k8s.io/minikube/pkg/version"
|
||||
)
|
||||
|
||||
const (
|
||||
isoURL = "iso-url"
|
||||
memory = "memory"
|
||||
cpus = "cpus"
|
||||
humanReadableDiskSize = "disk-size"
|
||||
nfsSharesRoot = "nfs-shares-root"
|
||||
nfsShare = "nfs-share"
|
||||
kubernetesVersion = "kubernetes-version"
|
||||
hostOnlyCIDR = "host-only-cidr"
|
||||
containerRuntime = "container-runtime"
|
||||
criSocket = "cri-socket"
|
||||
networkPlugin = "network-plugin"
|
||||
enableDefaultCNI = "enable-default-cni"
|
||||
hypervVirtualSwitch = "hyperv-virtual-switch"
|
||||
hypervUseExternalSwitch = "hyperv-use-external-switch"
|
||||
hypervExternalAdapter = "hyperv-external-adapter"
|
||||
kvmNetwork = "kvm-network"
|
||||
kvmQemuURI = "kvm-qemu-uri"
|
||||
kvmGPU = "kvm-gpu"
|
||||
kvmHidden = "kvm-hidden"
|
||||
minikubeEnvPrefix = "MINIKUBE"
|
||||
installAddons = "install-addons"
|
||||
defaultDiskSize = "20000mb"
|
||||
keepContext = "keep-context"
|
||||
createMount = "mount"
|
||||
featureGates = "feature-gates"
|
||||
apiServerName = "apiserver-name"
|
||||
apiServerPort = "apiserver-port"
|
||||
dnsDomain = "dns-domain"
|
||||
serviceCIDR = "service-cluster-ip-range"
|
||||
imageRepository = "image-repository"
|
||||
imageMirrorCountry = "image-mirror-country"
|
||||
mountString = "mount-string"
|
||||
disableDriverMounts = "disable-driver-mounts"
|
||||
cacheImages = "cache-images"
|
||||
uuid = "uuid"
|
||||
vpnkitSock = "hyperkit-vpnkit-sock"
|
||||
vsockPorts = "hyperkit-vsock-ports"
|
||||
embedCerts = "embed-certs"
|
||||
noVTXCheck = "no-vtx-check"
|
||||
downloadOnly = "download-only"
|
||||
dnsProxy = "dns-proxy"
|
||||
hostDNSResolver = "host-dns-resolver"
|
||||
waitUntilHealthy = "wait"
|
||||
force = "force"
|
||||
dryRun = "dry-run"
|
||||
interactive = "interactive"
|
||||
waitTimeout = "wait-timeout"
|
||||
nativeSSH = "native-ssh"
|
||||
minUsableMem = 1024 // Kubernetes will not start with less than 1GB
|
||||
minRecommendedMem = 2000 // Warn at no lower than existing configurations
|
||||
minimumCPUS = 2
|
||||
minimumDiskSize = 2000
|
||||
autoUpdate = "auto-update-drivers"
|
||||
hostOnlyNicType = "host-only-nic-type"
|
||||
natNicType = "nat-nic-type"
|
||||
)
|
||||
|
||||
var (
|
||||
registryMirror []string
|
||||
insecureRegistry []string
|
||||
|
@ -139,100 +81,6 @@ func init() {
|
|||
}
|
||||
}
|
||||
|
||||
// initMinikubeFlags includes commandline flags for minikube.
|
||||
func initMinikubeFlags() {
|
||||
viper.SetEnvPrefix(minikubeEnvPrefix)
|
||||
// Replaces '-' in flags with '_' in env variables
|
||||
// e.g. iso-url => $ENVPREFIX_ISO_URL
|
||||
viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
|
||||
viper.AutomaticEnv()
|
||||
|
||||
startCmd.Flags().Bool(force, false, "Force minikube to perform possibly dangerous operations")
|
||||
startCmd.Flags().Bool(interactive, true, "Allow user prompts for more information")
|
||||
startCmd.Flags().Bool(dryRun, false, "dry-run mode. Validates configuration, but does not mutate system state")
|
||||
|
||||
startCmd.Flags().Int(cpus, 2, "Number of CPUs allocated to Kubernetes.")
|
||||
startCmd.Flags().String(memory, "", "Amount of RAM to allocate to Kubernetes (format: <number>[<unit>], where unit = b, k, m or g).")
|
||||
startCmd.Flags().String(humanReadableDiskSize, defaultDiskSize, "Disk size allocated to the minikube VM (format: <number>[<unit>], where unit = b, k, m or g).")
|
||||
startCmd.Flags().Bool(downloadOnly, false, "If true, only download and cache files for later use - don't install or start anything.")
|
||||
startCmd.Flags().Bool(cacheImages, true, "If true, cache docker images for the current bootstrapper and load them into the machine. Always false with --driver=none.")
|
||||
startCmd.Flags().StringSlice(isoURL, download.DefaultISOURLs(), "Locations to fetch the minikube ISO from.")
|
||||
startCmd.Flags().Bool(keepContext, false, "This will keep the existing kubectl context and will create a minikube context.")
|
||||
startCmd.Flags().Bool(embedCerts, false, "if true, will embed the certs in kubeconfig.")
|
||||
startCmd.Flags().String(containerRuntime, "docker", "The container runtime to be used (docker, crio, containerd).")
|
||||
startCmd.Flags().Bool(createMount, false, "This will start the mount daemon and automatically mount files into minikube.")
|
||||
startCmd.Flags().String(mountString, constants.DefaultMountDir+":/minikube-host", "The argument to pass the minikube mount command on start.")
|
||||
startCmd.Flags().StringArrayVar(&node.AddonList, "addons", nil, "Enable addons. see `minikube addons list` for a list of valid addon names.")
|
||||
startCmd.Flags().String(criSocket, "", "The cri socket path to be used.")
|
||||
startCmd.Flags().String(networkPlugin, "", "The name of the network plugin.")
|
||||
startCmd.Flags().Bool(enableDefaultCNI, false, "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \"--network-plugin=cni\".")
|
||||
startCmd.Flags().Bool(waitUntilHealthy, true, "Block until the apiserver is servicing API requests")
|
||||
startCmd.Flags().Duration(waitTimeout, 6*time.Minute, "max time to wait per Kubernetes core services to be healthy.")
|
||||
startCmd.Flags().Bool(nativeSSH, true, "Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'.")
|
||||
startCmd.Flags().Bool(autoUpdate, true, "If set, automatically updates drivers to the latest version. Defaults to true.")
|
||||
startCmd.Flags().Bool(installAddons, true, "If set, install addons. Defaults to true.")
|
||||
}
|
||||
|
||||
// initKubernetesFlags inits the commandline flags for kubernetes related options
|
||||
func initKubernetesFlags() {
|
||||
startCmd.Flags().String(kubernetesVersion, "", "The kubernetes version that the minikube VM will use (ex: v1.2.3)")
|
||||
startCmd.Flags().Var(&node.ExtraOptions, "extra-config",
|
||||
`A set of key=value pairs that describe configuration that may be passed to different components.
|
||||
The key should be '.' separated, and the first part before the dot is the component to apply the configuration to.
|
||||
Valid components are: kubelet, kubeadm, apiserver, controller-manager, etcd, kube-proxy, scheduler
|
||||
Valid kubeadm parameters: `+fmt.Sprintf("%s, %s", strings.Join(bsutil.KubeadmExtraArgsWhitelist[bsutil.KubeadmCmdParam], ", "), strings.Join(bsutil.KubeadmExtraArgsWhitelist[bsutil.KubeadmConfigParam], ",")))
|
||||
startCmd.Flags().String(featureGates, "", "A set of key=value pairs that describe feature gates for alpha/experimental features.")
|
||||
startCmd.Flags().String(dnsDomain, constants.ClusterDNSDomain, "The cluster dns domain name used in the kubernetes cluster")
|
||||
startCmd.Flags().Int(apiServerPort, constants.APIServerPort, "The apiserver listening port")
|
||||
startCmd.Flags().String(apiServerName, constants.APIServerName, "The apiserver name which is used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine")
|
||||
startCmd.Flags().StringArrayVar(&apiServerNames, "apiserver-names", nil, "A set of apiserver names which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine")
|
||||
startCmd.Flags().IPSliceVar(&apiServerIPs, "apiserver-ips", nil, "A set of apiserver IP Addresses which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine")
|
||||
}
|
||||
|
||||
// initDriverFlags inits the commandline flags for vm drivers
|
||||
func initDriverFlags() {
|
||||
startCmd.Flags().String("driver", "", fmt.Sprintf("Driver is one of: %v (defaults to auto-detect)", driver.DisplaySupportedDrivers()))
|
||||
startCmd.Flags().String("vm-driver", "", "DEPRECATED, use `driver` instead.")
|
||||
startCmd.Flags().Bool(disableDriverMounts, false, "Disables the filesystem mounts provided by the hypervisors")
|
||||
|
||||
// kvm2
|
||||
startCmd.Flags().String(kvmNetwork, "default", "The KVM network name. (kvm2 driver only)")
|
||||
startCmd.Flags().String(kvmQemuURI, "qemu:///system", "The KVM QEMU connection URI. (kvm2 driver only)")
|
||||
startCmd.Flags().Bool(kvmGPU, false, "Enable experimental NVIDIA GPU support in minikube")
|
||||
startCmd.Flags().Bool(kvmHidden, false, "Hide the hypervisor signature from the guest in minikube (kvm2 driver only)")
|
||||
|
||||
// virtualbox
|
||||
startCmd.Flags().String(hostOnlyCIDR, "192.168.99.1/24", "The CIDR to be used for the minikube VM (virtualbox driver only)")
|
||||
startCmd.Flags().Bool(dnsProxy, false, "Enable proxy for NAT DNS requests (virtualbox driver only)")
|
||||
startCmd.Flags().Bool(hostDNSResolver, true, "Enable host resolver for NAT DNS requests (virtualbox driver only)")
|
||||
startCmd.Flags().Bool(noVTXCheck, false, "Disable checking for the availability of hardware virtualization before the vm is started (virtualbox driver only)")
|
||||
startCmd.Flags().String(hostOnlyNicType, "virtio", "NIC Type used for host only network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only)")
|
||||
startCmd.Flags().String(natNicType, "virtio", "NIC Type used for host only network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only)")
|
||||
|
||||
// hyperkit
|
||||
startCmd.Flags().StringSlice(vsockPorts, []string{}, "List of guest VSock ports that should be exposed as sockets on the host (hyperkit driver only)")
|
||||
startCmd.Flags().String(uuid, "", "Provide VM UUID to restore MAC address (hyperkit driver only)")
|
||||
startCmd.Flags().String(vpnkitSock, "", "Location of the VPNKit socket used for networking. If empty, disables Hyperkit VPNKitSock, if 'auto' uses Docker for Mac VPNKit connection, otherwise uses the specified VSock (hyperkit driver only)")
|
||||
startCmd.Flags().StringSlice(nfsShare, []string{}, "Local folders to share with Guest via NFS mounts (hyperkit driver only)")
|
||||
startCmd.Flags().String(nfsSharesRoot, "/nfsshares", "Where to root the NFS Shares, defaults to /nfsshares (hyperkit driver only)")
|
||||
|
||||
// hyperv
|
||||
startCmd.Flags().String(hypervVirtualSwitch, "", "The hyperv virtual switch name. Defaults to first found. (hyperv driver only)")
|
||||
startCmd.Flags().Bool(hypervUseExternalSwitch, false, "Whether to use external switch over Default Switch if virtual switch not explicitly specified. (hyperv driver only)")
|
||||
startCmd.Flags().String(hypervExternalAdapter, "", "External Adapter on which external switch will be created if no external switch is found. (hyperv driver only)")
|
||||
}
|
||||
|
||||
// initNetworkingFlags inits the commandline flags for connectivity related flags for start
|
||||
func initNetworkingFlags() {
|
||||
startCmd.Flags().StringSliceVar(&insecureRegistry, "insecure-registry", nil, "Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added.")
|
||||
startCmd.Flags().StringSliceVar(®istryMirror, "registry-mirror", nil, "Registry mirrors to pass to the Docker daemon")
|
||||
startCmd.Flags().String(imageRepository, "", "Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to \"auto\" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers")
|
||||
startCmd.Flags().String(imageMirrorCountry, "", "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.")
|
||||
startCmd.Flags().String(serviceCIDR, constants.DefaultServiceCIDR, "The CIDR to be used for service cluster IPs.")
|
||||
startCmd.Flags().StringArrayVar(&node.DockerEnv, "docker-env", nil, "Environment variables to pass to the Docker daemon. (format: key=value)")
|
||||
startCmd.Flags().StringArrayVar(&node.DockerOpt, "docker-opt", nil, "Specify arbitrary flags to pass to the Docker daemon. (format: key=value)")
|
||||
}
|
||||
|
||||
// startCmd represents the start command
|
||||
var startCmd = &cobra.Command{
|
||||
Use: "start",
|
||||
|
@ -276,6 +124,13 @@ func platform() string {
|
|||
// runStart handles the executes the flow of "minikube start"
|
||||
func runStart(cmd *cobra.Command, args []string) {
|
||||
displayVersion(version.GetVersion())
|
||||
|
||||
// No need to do the update check if no one is going to see it
|
||||
if !viper.GetBool(interactive) || !viper.GetBool(dryRun) {
|
||||
// Avoid blocking execution on optional HTTP fetches
|
||||
go notify.MaybePrintUpdateTextFromGithub()
|
||||
}
|
||||
|
||||
displayEnviron(os.Environ())
|
||||
|
||||
// if --registry-mirror specified when run minikube start,
|
||||
|
@ -289,16 +144,65 @@ func runStart(cmd *cobra.Command, args []string) {
|
|||
registryMirror = viper.GetStringSlice("registry_mirror")
|
||||
}
|
||||
|
||||
existing, err := config.Load(viper.GetString(config.ProfileName))
|
||||
existing, err := config.Load(ClusterFlagValue())
|
||||
if err != nil && !config.IsNotExist(err) {
|
||||
exit.WithCodeT(exit.Data, "Unable to load config: {{.error}}", out.V{"error": err})
|
||||
}
|
||||
|
||||
ds := selectDriver(existing)
|
||||
validateSpecifiedDriver(existing)
|
||||
ds, alts, specified := selectDriver(existing)
|
||||
starter, err := provisionWithDriver(cmd, ds, existing)
|
||||
if err != nil {
|
||||
if specified {
|
||||
// If the user specified a driver, don't fallback to anything else
|
||||
exit.WithError("error provisioning host", err)
|
||||
} else {
|
||||
success := false
|
||||
// Walk down the rest of the options
|
||||
for _, alt := range alts {
|
||||
out.WarningT("Startup with {{.old_driver}} driver failed, trying with alternate driver {{.new_driver}}: {{.error}}", out.V{"old_driver": ds.Name, "new_driver": alt.Name, "error": err})
|
||||
ds = alt
|
||||
// Delete the existing cluster and try again with the next driver on the list
|
||||
profile, err := config.LoadProfile(ClusterFlagValue())
|
||||
if err != nil {
|
||||
glog.Warningf("%s profile does not exist, trying anyways.", ClusterFlagValue())
|
||||
}
|
||||
|
||||
err = deleteProfile(profile)
|
||||
if err != nil {
|
||||
out.WarningT("Failed to delete cluster {{.name}}, proceeding with retry anyway.", out.V{"name": ClusterFlagValue()})
|
||||
}
|
||||
starter, err = provisionWithDriver(cmd, ds, existing)
|
||||
if err != nil {
|
||||
continue
|
||||
} else {
|
||||
// Success!
|
||||
success = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !success {
|
||||
exit.WithError("error provisioning host", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
kubeconfig, err := startWithDriver(starter, existing)
|
||||
if err != nil {
|
||||
exit.WithError("failed to start node", err)
|
||||
}
|
||||
|
||||
if err := showKubectlInfo(kubeconfig, starter.Node.KubernetesVersion, starter.Cfg.Name); err != nil {
|
||||
glog.Errorf("kubectl info: %v", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func provisionWithDriver(cmd *cobra.Command, ds registry.DriverState, existing *config.ClusterConfig) (node.Starter, error) {
|
||||
driverName := ds.Name
|
||||
glog.Infof("selected driver: %s", driverName)
|
||||
validateDriver(ds, existing)
|
||||
err = autoSetDriverOptions(cmd, driverName)
|
||||
err := autoSetDriverOptions(cmd, driverName)
|
||||
if err != nil {
|
||||
glog.Errorf("Error autoSetOptions : %v", err)
|
||||
}
|
||||
|
@ -312,23 +216,23 @@ func runStart(cmd *cobra.Command, args []string) {
|
|||
}
|
||||
|
||||
k8sVersion := getKubernetesVersion(existing)
|
||||
mc, n, err := generateCfgFromFlags(cmd, k8sVersion, driverName)
|
||||
cc, n, err := generateClusterConfig(cmd, existing, k8sVersion, driverName)
|
||||
if err != nil {
|
||||
exit.WithError("Failed to generate config", err)
|
||||
return node.Starter{}, errors.Wrap(err, "Failed to generate config")
|
||||
}
|
||||
|
||||
// This is about as far as we can go without overwriting config files
|
||||
if viper.GetBool(dryRun) {
|
||||
out.T(out.DryRun, `dry-run validation complete!`)
|
||||
return
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
if !driver.BareMetal(driverName) && !driver.IsKIC(driverName) {
|
||||
if driver.IsVM(driverName) {
|
||||
url, err := download.ISO(viper.GetStringSlice(isoURL), cmd.Flags().Changed(isoURL))
|
||||
if err != nil {
|
||||
exit.WithError("Failed to cache ISO", err)
|
||||
return node.Starter{}, errors.Wrap(err, "Failed to cache ISO")
|
||||
}
|
||||
mc.MinikubeISO = url
|
||||
cc.MinikubeISO = url
|
||||
}
|
||||
|
||||
if viper.GetBool(nativeSSH) {
|
||||
|
@ -337,14 +241,65 @@ func runStart(cmd *cobra.Command, args []string) {
|
|||
ssh.SetDefaultClient(ssh.External)
|
||||
}
|
||||
|
||||
kubeconfig, err := startNode(existing, mc, n)
|
||||
if err != nil {
|
||||
exit.WithError("Starting node", err)
|
||||
var existingAddons map[string]bool
|
||||
if viper.GetBool(installAddons) {
|
||||
existingAddons = map[string]bool{}
|
||||
if existing != nil && existing.Addons != nil {
|
||||
existingAddons = existing.Addons
|
||||
}
|
||||
}
|
||||
|
||||
if err := showKubectlInfo(kubeconfig, k8sVersion, mc.Name); err != nil {
|
||||
glog.Errorf("kubectl info: %v", err)
|
||||
mRunner, preExists, mAPI, host, err := node.Provision(&cc, &n, true)
|
||||
if err != nil {
|
||||
return node.Starter{}, err
|
||||
}
|
||||
|
||||
return node.Starter{
|
||||
Runner: mRunner,
|
||||
PreExists: preExists,
|
||||
MachineAPI: mAPI,
|
||||
Host: host,
|
||||
ExistingAddons: existingAddons,
|
||||
Cfg: &cc,
|
||||
Node: &n,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func startWithDriver(starter node.Starter, existing *config.ClusterConfig) (*kubeconfig.Settings, error) {
|
||||
kubeconfig, err := node.Start(starter, true)
|
||||
if err != nil {
|
||||
kubeconfig, err = maybeDeleteAndRetry(*starter.Cfg, *starter.Node, starter.ExistingAddons, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
numNodes := viper.GetInt(nodes)
|
||||
if numNodes == 1 && existing != nil {
|
||||
numNodes = len(existing.Nodes)
|
||||
}
|
||||
if numNodes > 1 {
|
||||
if driver.BareMetal(starter.Cfg.Driver) {
|
||||
exit.WithCodeT(exit.Config, "The none driver is not compatible with multi-node clusters.")
|
||||
} else {
|
||||
for i := 1; i < numNodes; i++ {
|
||||
nodeName := node.Name(i + 1)
|
||||
n := config.Node{
|
||||
Name: nodeName,
|
||||
Worker: true,
|
||||
ControlPlane: false,
|
||||
KubernetesVersion: starter.Cfg.KubernetesConfig.KubernetesVersion,
|
||||
}
|
||||
out.Ln("") // extra newline for clarity on the command line
|
||||
err := node.Add(starter.Cfg, n)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "adding node")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return kubeconfig, nil
|
||||
}
|
||||
|
||||
func updateDriver(driverName string) {
|
||||
|
@ -358,16 +313,11 @@ func updateDriver(driverName string) {
|
|||
|
||||
func displayVersion(version string) {
|
||||
prefix := ""
|
||||
if viper.GetString(config.ProfileName) != constants.DefaultClusterName {
|
||||
prefix = fmt.Sprintf("[%s] ", viper.GetString(config.ProfileName))
|
||||
if ClusterFlagValue() != constants.DefaultClusterName {
|
||||
prefix = fmt.Sprintf("[%s] ", ClusterFlagValue())
|
||||
}
|
||||
|
||||
versionState := out.Happy
|
||||
if notify.MaybePrintUpdateTextFromGithub() {
|
||||
versionState = out.Meh
|
||||
}
|
||||
|
||||
out.T(versionState, "{{.prefix}}minikube {{.version}} on {{.platform}}", out.V{"prefix": prefix, "version": version, "platform": platform()})
|
||||
out.T(out.Happy, "{{.prefix}}minikube {{.version}} on {{.platform}}", out.V{"prefix": prefix, "version": version, "platform": platform()})
|
||||
}
|
||||
|
||||
// displayEnviron makes the user aware of environment variables that will affect how minikube operates
|
||||
|
@ -382,17 +332,6 @@ func displayEnviron(env []string) {
|
|||
}
|
||||
}
|
||||
|
||||
func startNode(existing *config.ClusterConfig, mc config.ClusterConfig, n config.Node) (*kubeconfig.Settings, error) {
|
||||
var existingAddons map[string]bool
|
||||
if viper.GetBool(installAddons) {
|
||||
existingAddons = map[string]bool{}
|
||||
if existing != nil && existing.Addons != nil {
|
||||
existingAddons = existing.Addons
|
||||
}
|
||||
}
|
||||
return node.Start(mc, n, true, existingAddons)
|
||||
}
|
||||
|
||||
func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName string) error {
|
||||
if kcs.KeepContext {
|
||||
out.T(out.Kubectl, "To connect to this cluster, use: kubectl --context={{.name}}", out.V{"name": kcs.ClusterName})
|
||||
|
@ -402,26 +341,16 @@ func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName st
|
|||
|
||||
path, err := exec.LookPath("kubectl")
|
||||
if err != nil {
|
||||
out.T(out.Tip, "For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/")
|
||||
out.ErrT(out.Tip, "For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/")
|
||||
return nil
|
||||
}
|
||||
|
||||
j, err := exec.Command(path, "version", "--client", "--output=json").Output()
|
||||
gitVersion, err := kubectlVersion(path)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "exec")
|
||||
return err
|
||||
}
|
||||
|
||||
cv := struct {
|
||||
ClientVersion struct {
|
||||
GitVersion string `json:"gitVersion"`
|
||||
} `json:"clientVersion"`
|
||||
}{}
|
||||
err = json.Unmarshal(j, &cv)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unmarshal")
|
||||
}
|
||||
|
||||
client, err := semver.Make(strings.TrimPrefix(cv.ClientVersion.GitVersion, version.VersionPrefix))
|
||||
client, err := semver.Make(strings.TrimPrefix(gitVersion, version.VersionPrefix))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "client semver")
|
||||
}
|
||||
|
@ -432,42 +361,136 @@ func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName st
|
|||
|
||||
if client.Major != cluster.Major || minorSkew > 1 {
|
||||
out.Ln("")
|
||||
out.T(out.Warning, "{{.path}} is v{{.client_version}}, which may be incompatible with Kubernetes v{{.cluster_version}}.",
|
||||
out.WarningT("{{.path}} is v{{.client_version}}, which may be incompatible with Kubernetes v{{.cluster_version}}.",
|
||||
out.V{"path": path, "client_version": client, "cluster_version": cluster})
|
||||
out.T(out.Tip, "You can also use 'minikube kubectl -- get pods' to invoke a matching version",
|
||||
out.ErrT(out.Tip, "You can also use 'minikube kubectl -- get pods' to invoke a matching version",
|
||||
out.V{"path": path, "client_version": client})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func selectDriver(existing *config.ClusterConfig) registry.DriverState {
|
||||
func maybeDeleteAndRetry(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool, originalErr error) (*kubeconfig.Settings, error) {
|
||||
if viper.GetBool(deleteOnFailure) {
|
||||
out.WarningT("Node {{.name}} failed to start, deleting and trying again.", out.V{"name": n.Name})
|
||||
// Start failed, delete the cluster and try again
|
||||
profile, err := config.LoadProfile(cc.Name)
|
||||
if err != nil {
|
||||
out.ErrT(out.Meh, `"{{.name}}" profile does not exist, trying anyways.`, out.V{"name": cc.Name})
|
||||
}
|
||||
|
||||
err = deleteProfile(profile)
|
||||
if err != nil {
|
||||
out.WarningT("Failed to delete cluster {{.name}}, proceeding with retry anyway.", out.V{"name": cc.Name})
|
||||
}
|
||||
|
||||
var kubeconfig *kubeconfig.Settings
|
||||
for _, n := range cc.Nodes {
|
||||
r, p, m, h, err := node.Provision(&cc, &n, n.ControlPlane)
|
||||
s := node.Starter{
|
||||
Runner: r,
|
||||
PreExists: p,
|
||||
MachineAPI: m,
|
||||
Host: h,
|
||||
Cfg: &cc,
|
||||
Node: &n,
|
||||
ExistingAddons: existingAddons,
|
||||
}
|
||||
if err != nil {
|
||||
// Ok we failed again, let's bail
|
||||
return nil, err
|
||||
}
|
||||
|
||||
k, err := node.Start(s, n.ControlPlane)
|
||||
if n.ControlPlane {
|
||||
kubeconfig = k
|
||||
}
|
||||
if err != nil {
|
||||
// Ok we failed again, let's bail
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return kubeconfig, nil
|
||||
}
|
||||
// Don't delete the cluster unless they ask
|
||||
return nil, errors.Wrap(originalErr, "startup failed")
|
||||
}
|
||||
|
||||
func kubectlVersion(path string) (string, error) {
|
||||
j, err := exec.Command(path, "version", "--client", "--output=json").Output()
|
||||
if err != nil {
|
||||
// really old kubernetes clients did not have the --output parameter
|
||||
b, err := exec.Command(path, "version", "--client", "--short").Output()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "exec")
|
||||
}
|
||||
s := strings.TrimSpace(string(b))
|
||||
return strings.Replace(s, "Client Version: ", "", 1), nil
|
||||
}
|
||||
|
||||
cv := struct {
|
||||
ClientVersion struct {
|
||||
GitVersion string `json:"gitVersion"`
|
||||
} `json:"clientVersion"`
|
||||
}{}
|
||||
err = json.Unmarshal(j, &cv)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "unmarshal")
|
||||
}
|
||||
|
||||
return cv.ClientVersion.GitVersion, nil
|
||||
}
|
||||
|
||||
func selectDriver(existing *config.ClusterConfig) (registry.DriverState, []registry.DriverState, bool) {
|
||||
// Technically unrelated, but important to perform before detection
|
||||
driver.SetLibvirtURI(viper.GetString(kvmQemuURI))
|
||||
|
||||
// By default, the driver is whatever we used last time
|
||||
if existing != nil && existing.Driver != "" {
|
||||
ds := driver.Status(existing.Driver)
|
||||
if existing != nil {
|
||||
old := hostDriver(existing)
|
||||
ds := driver.Status(old)
|
||||
out.T(out.Sparkle, `Using the {{.driver}} driver based on existing profile`, out.V{"driver": ds.String()})
|
||||
return ds
|
||||
return ds, nil, true
|
||||
}
|
||||
|
||||
// Default to looking at the new driver parameter
|
||||
if viper.GetString("driver") != "" {
|
||||
ds := driver.Status(viper.GetString("driver"))
|
||||
if d := viper.GetString("driver"); d != "" {
|
||||
if vmd := viper.GetString("vm-driver"); vmd != "" {
|
||||
// Output a warning
|
||||
warning := `Both driver={{.driver}} and vm-driver={{.vmd}} have been set.
|
||||
|
||||
Since vm-driver is deprecated, minikube will default to driver={{.driver}}.
|
||||
|
||||
If vm-driver is set in the global config, please run "minikube config unset vm-driver" to resolve this warning.
|
||||
`
|
||||
out.WarningT(warning, out.V{"driver": d, "vmd": vmd})
|
||||
}
|
||||
ds := driver.Status(d)
|
||||
if ds.Name == "" {
|
||||
exit.WithCodeT(exit.Unavailable, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": d, "os": runtime.GOOS})
|
||||
}
|
||||
out.T(out.Sparkle, `Using the {{.driver}} driver based on user configuration`, out.V{"driver": ds.String()})
|
||||
return ds
|
||||
return ds, nil, true
|
||||
}
|
||||
|
||||
// Fallback to old driver parameter
|
||||
if viper.GetString("vm-driver") != "" {
|
||||
if d := viper.GetString("vm-driver"); d != "" {
|
||||
ds := driver.Status(viper.GetString("vm-driver"))
|
||||
if ds.Name == "" {
|
||||
exit.WithCodeT(exit.Unavailable, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": d, "os": runtime.GOOS})
|
||||
}
|
||||
out.T(out.Sparkle, `Using the {{.driver}} driver based on user configuration`, out.V{"driver": ds.String()})
|
||||
return ds
|
||||
return ds, nil, true
|
||||
}
|
||||
|
||||
pick, alts := driver.Suggest(driver.Choices())
|
||||
choices := driver.Choices(viper.GetBool("vm"))
|
||||
pick, alts, rejects := driver.Suggest(choices)
|
||||
if pick.Name == "" {
|
||||
exit.WithCodeT(exit.Config, "Unable to determine a default driver to use. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/")
|
||||
out.T(out.ThumbsDown, "Unable to pick a default driver. Here is what was considered, in preference order:")
|
||||
for _, r := range rejects {
|
||||
out.T(out.Option, "{{ .name }}: {{ .rejection }}", out.V{"name": r.Name, "rejection": r.Rejection})
|
||||
}
|
||||
out.T(out.Workaround, "Try specifying a --driver, or see https://minikube.sigs.k8s.io/docs/start/")
|
||||
os.Exit(exit.Unavailable)
|
||||
}
|
||||
|
||||
if len(alts) > 1 {
|
||||
|
@ -479,7 +502,72 @@ func selectDriver(existing *config.ClusterConfig) registry.DriverState {
|
|||
} else {
|
||||
out.T(out.Sparkle, `Automatically selected the {{.driver}} driver`, out.V{"driver": pick.String()})
|
||||
}
|
||||
return pick
|
||||
return pick, alts, false
|
||||
}
|
||||
|
||||
// hostDriver returns the actual driver used by a libmachine host, which can differ from our config
|
||||
func hostDriver(existing *config.ClusterConfig) string {
|
||||
if existing == nil {
|
||||
return ""
|
||||
}
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
glog.Warningf("selectDriver NewAPIClient: %v", err)
|
||||
return existing.Driver
|
||||
}
|
||||
|
||||
cp, err := config.PrimaryControlPlane(existing)
|
||||
if err != nil {
|
||||
glog.Warningf("Unable to get control plane from existing config: %v", err)
|
||||
return existing.Driver
|
||||
}
|
||||
machineName := driver.MachineName(*existing, cp)
|
||||
h, err := api.Load(machineName)
|
||||
if err != nil {
|
||||
glog.Warningf("selectDriver api.Load: %v", err)
|
||||
return existing.Driver
|
||||
}
|
||||
|
||||
return h.Driver.DriverName()
|
||||
}
|
||||
|
||||
// validateSpecifiedDriver makes sure that if a user has passed in a driver
|
||||
// it matches the existing cluster if there is one
|
||||
func validateSpecifiedDriver(existing *config.ClusterConfig) {
|
||||
if existing == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var requested string
|
||||
if d := viper.GetString("driver"); d != "" {
|
||||
requested = d
|
||||
} else if d := viper.GetString("vm-driver"); d != "" {
|
||||
requested = d
|
||||
}
|
||||
|
||||
// Neither --vm-driver or --driver was specified
|
||||
if requested == "" {
|
||||
return
|
||||
}
|
||||
|
||||
old := hostDriver(existing)
|
||||
if requested == old {
|
||||
return
|
||||
}
|
||||
|
||||
out.ErrT(out.Conflict, `The existing "{{.name}}" VM was created using the "{{.old}}" driver, and is incompatible with the "{{.new}}" driver.`,
|
||||
out.V{"name": existing.Name, "new": requested, "old": old})
|
||||
|
||||
out.ErrT(out.Workaround, `To proceed, either:
|
||||
|
||||
1) Delete the existing "{{.name}}" cluster using: '{{.delcommand}}'
|
||||
|
||||
* or *
|
||||
|
||||
2) Start the existing "{{.name}}" cluster using: '{{.command}} --driver={{.old}}'
|
||||
`, out.V{"command": mustload.ExampleCmd(existing.Name, "start"), "delcommand": mustload.ExampleCmd(existing.Name, "delete"), "old": old, "name": existing.Name})
|
||||
|
||||
exit.WithCodeT(exit.Config, "Exiting.")
|
||||
}
|
||||
|
||||
// validateDriver validates that the selected driver appears sane, exits if not
|
||||
|
@ -487,7 +575,7 @@ func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) {
|
|||
name := ds.Name
|
||||
glog.Infof("validating driver %q against %+v", name, existing)
|
||||
if !driver.Supported(name) {
|
||||
exit.WithCodeT(exit.Unavailable, "The driver {{.experimental}} '{{.driver}}' is not supported on {{.os}}", out.V{"driver": name, "os": runtime.GOOS})
|
||||
exit.WithCodeT(exit.Unavailable, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": name, "os": runtime.GOOS})
|
||||
}
|
||||
|
||||
st := ds.State
|
||||
|
@ -504,52 +592,18 @@ func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) {
|
|||
out.ErrLn("")
|
||||
|
||||
if !st.Installed && !viper.GetBool(force) {
|
||||
if existing != nil && name == existing.Driver {
|
||||
exit.WithCodeT(exit.Unavailable, "{{.driver}} does not appear to be installed, but is specified by an existing profile. Please run 'minikube delete' or install {{.driver}}", out.V{"driver": name})
|
||||
if existing != nil {
|
||||
if old := hostDriver(existing); name == old {
|
||||
exit.WithCodeT(exit.Unavailable, "{{.driver}} does not appear to be installed, but is specified by an existing profile. Please run 'minikube delete' or install {{.driver}}", out.V{"driver": name})
|
||||
}
|
||||
}
|
||||
exit.WithCodeT(exit.Unavailable, "{{.driver}} does not appear to be installed", out.V{"driver": name})
|
||||
}
|
||||
|
||||
if !viper.GetBool(force) {
|
||||
exit.WithCodeT(exit.Unavailable, "Failed to validate '{{.driver}}' driver", out.V{"driver": name})
|
||||
}
|
||||
}
|
||||
|
||||
if existing == nil {
|
||||
return
|
||||
}
|
||||
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
glog.Warningf("selectDriver NewAPIClient: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
cp, err := config.PrimaryControlPlane(existing)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting primary cp", err)
|
||||
}
|
||||
|
||||
machineName := driver.MachineName(*existing, cp)
|
||||
h, err := api.Load(machineName)
|
||||
if err != nil {
|
||||
glog.Warningf("selectDriver api.Load: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if h.Driver.DriverName() == name {
|
||||
return
|
||||
}
|
||||
|
||||
out.ErrT(out.Conflict, `The existing "{{.profile_name}}" VM that was created using the "{{.old_driver}}" driver, and is incompatible with the "{{.driver}}" driver.`,
|
||||
out.V{"profile_name": machineName, "driver": name, "old_driver": h.Driver.DriverName()})
|
||||
|
||||
out.ErrT(out.Workaround, `To proceed, either:
|
||||
|
||||
1) Delete the existing "{{.profile_name}}" cluster using: '{{.command}} delete'
|
||||
|
||||
* or *
|
||||
|
||||
2) Start the existing "{{.profile_name}}" cluster using: '{{.command}} start --driver={{.old_driver}}'
|
||||
`, out.V{"command": minikubeCmd(), "old_driver": h.Driver.DriverName(), "profile_name": machineName})
|
||||
|
||||
exit.WithCodeT(exit.Config, "Exiting.")
|
||||
}
|
||||
|
||||
func selectImageRepository(mirrorCountry string, v semver.Version) (bool, string, error) {
|
||||
|
@ -603,14 +657,6 @@ func selectImageRepository(mirrorCountry string, v semver.Version) (bool, string
|
|||
return false, fallback, nil
|
||||
}
|
||||
|
||||
// Return a minikube command containing the current profile name
|
||||
func minikubeCmd() string {
|
||||
if viper.GetString(config.ProfileName) != constants.DefaultClusterName {
|
||||
return fmt.Sprintf("minikube -p %s", config.ProfileName)
|
||||
}
|
||||
return "minikube"
|
||||
}
|
||||
|
||||
// validateUser validates minikube is run by the recommended user (privileged or regular)
|
||||
func validateUser(drvName string) {
|
||||
u, err := user.Current()
|
||||
|
@ -622,23 +668,24 @@ func validateUser(drvName string) {
|
|||
useForce := viper.GetBool(force)
|
||||
|
||||
if driver.NeedsRoot(drvName) && u.Uid != "0" && !useForce {
|
||||
exit.WithCodeT(exit.Permissions, `The "{{.driver_name}}" driver requires root privileges. Please run minikube using 'sudo minikube --driver={{.driver_name}}'.`, out.V{"driver_name": drvName})
|
||||
exit.WithCodeT(exit.Permissions, `The "{{.driver_name}}" driver requires root privileges. Please run minikube using 'sudo minikube start --driver={{.driver_name}}'.`, out.V{"driver_name": drvName})
|
||||
}
|
||||
|
||||
if driver.NeedsRoot(drvName) || u.Uid != "0" {
|
||||
return
|
||||
}
|
||||
|
||||
out.T(out.Stopped, `The "{{.driver_name}}" driver should not be used with root privileges.`, out.V{"driver_name": drvName})
|
||||
out.T(out.Tip, "If you are running minikube within a VM, consider using --driver=none:")
|
||||
out.T(out.Documentation, " https://minikube.sigs.k8s.io/docs/reference/drivers/none/")
|
||||
out.ErrT(out.Stopped, `The "{{.driver_name}}" driver should not be used with root privileges.`, out.V{"driver_name": drvName})
|
||||
out.ErrT(out.Tip, "If you are running minikube within a VM, consider using --driver=none:")
|
||||
out.ErrT(out.Documentation, " https://minikube.sigs.k8s.io/docs/reference/drivers/none/")
|
||||
|
||||
if !useForce {
|
||||
os.Exit(exit.Permissions)
|
||||
}
|
||||
_, err = config.Load(viper.GetString(config.ProfileName))
|
||||
cname := ClusterFlagValue()
|
||||
_, err = config.Load(cname)
|
||||
if err == nil || !config.IsNotExist(err) {
|
||||
out.T(out.Tip, "Tip: To remove this root owned cluster, run: sudo {{.cmd}} delete", out.V{"cmd": minikubeCmd()})
|
||||
out.ErrT(out.Tip, "Tip: To remove this root owned cluster, run: sudo {{.cmd}}", out.V{"cmd": mustload.ExampleCmd(cname, "delete")})
|
||||
}
|
||||
if !useForce {
|
||||
exit.WithCodeT(exit.Permissions, "Exiting")
|
||||
|
@ -666,6 +713,9 @@ func memoryLimits(drvName string) (int, int, error) {
|
|||
|
||||
// suggestMemoryAllocation calculates the default memory footprint in MB
|
||||
func suggestMemoryAllocation(sysLimit int, containerLimit int) int {
|
||||
if mem := viper.GetInt(memory); mem != 0 {
|
||||
return mem
|
||||
}
|
||||
fallback := 2200
|
||||
maximum := 6000
|
||||
|
||||
|
@ -758,7 +808,7 @@ func validateFlags(cmd *cobra.Command, drvName string) {
|
|||
}
|
||||
|
||||
if driver.BareMetal(drvName) {
|
||||
if viper.GetString(config.ProfileName) != constants.DefaultClusterName {
|
||||
if ClusterFlagValue() != constants.DefaultClusterName {
|
||||
exit.WithCodeT(exit.Config, "The '{{.name}} driver does not support multiple profiles: https://minikube.sigs.k8s.io/docs/reference/drivers/none/", out.V{"name": drvName})
|
||||
}
|
||||
|
||||
|
@ -766,10 +816,18 @@ func validateFlags(cmd *cobra.Command, drvName string) {
|
|||
if runtime != "docker" {
|
||||
out.WarningT("Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!", out.V{"runtime": runtime})
|
||||
}
|
||||
|
||||
// conntrack is required starting with kubernetes 1.18, include the release candidates for completion
|
||||
version, _ := util.ParseKubernetesVersion(getKubernetesVersion(nil))
|
||||
if version.GTE(semver.MustParse("1.18.0-beta.1")) {
|
||||
if _, err := exec.LookPath("conntrack"); err != nil {
|
||||
exit.WithCodeT(exit.Config, "Sorry, Kubernetes v{{.k8sVersion}} requires conntrack to be installed in root's path", out.V{"k8sVersion": version.String()})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// check that kubeadm extra args contain only whitelisted parameters
|
||||
for param := range node.ExtraOptions.AsMap().Get(bsutil.Kubeadm) {
|
||||
for param := range config.ExtraOptions.AsMap().Get(bsutil.Kubeadm) {
|
||||
if !config.ContainsParam(bsutil.KubeadmExtraArgsWhitelist[bsutil.KubeadmCmdParam], param) &&
|
||||
!config.ContainsParam(bsutil.KubeadmExtraArgsWhitelist[bsutil.KubeadmConfigParam], param) {
|
||||
exit.UsageT("Sorry, the kubeadm.{{.parameter_name}} parameter is currently not supported by --extra-config", out.V{"parameter_name": param})
|
||||
|
@ -797,145 +855,17 @@ func validateRegistryMirror() {
|
|||
}
|
||||
}
|
||||
|
||||
// generateCfgFromFlags generates config.Config based on flags and supplied arguments
|
||||
func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string) (config.ClusterConfig, config.Node, error) {
|
||||
r, err := cruntime.New(cruntime.Config{Type: viper.GetString(containerRuntime)})
|
||||
if err != nil {
|
||||
return config.ClusterConfig{}, config.Node{}, err
|
||||
}
|
||||
|
||||
// Pick good default values for --network-plugin and --enable-default-cni based on runtime.
|
||||
selectedEnableDefaultCNI := viper.GetBool(enableDefaultCNI)
|
||||
selectedNetworkPlugin := viper.GetString(networkPlugin)
|
||||
if r.DefaultCNI() && !cmd.Flags().Changed(networkPlugin) {
|
||||
selectedNetworkPlugin = "cni"
|
||||
if !cmd.Flags().Changed(enableDefaultCNI) {
|
||||
selectedEnableDefaultCNI = true
|
||||
}
|
||||
}
|
||||
|
||||
// Feed Docker our host proxy environment by default, so that it can pull images
|
||||
if _, ok := r.(*cruntime.Docker); ok && !cmd.Flags().Changed("docker-env") {
|
||||
setDockerProxy()
|
||||
}
|
||||
|
||||
repository := viper.GetString(imageRepository)
|
||||
mirrorCountry := strings.ToLower(viper.GetString(imageMirrorCountry))
|
||||
if strings.ToLower(repository) == "auto" || mirrorCountry != "" {
|
||||
found, autoSelectedRepository, err := selectImageRepository(mirrorCountry, semver.MustParse(k8sVersion))
|
||||
if err != nil {
|
||||
exit.WithError("Failed to check main repository and mirrors for images for images", err)
|
||||
}
|
||||
|
||||
if !found {
|
||||
if autoSelectedRepository == "" {
|
||||
exit.WithCodeT(exit.Failure, "None of the known repositories is accessible. Consider specifying an alternative image repository with --image-repository flag")
|
||||
} else {
|
||||
out.WarningT("None of the known repositories in your location are accessible. Using {{.image_repository_name}} as fallback.", out.V{"image_repository_name": autoSelectedRepository})
|
||||
}
|
||||
}
|
||||
|
||||
repository = autoSelectedRepository
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(imageRepository) {
|
||||
out.T(out.SuccessType, "Using image repository {{.name}}", out.V{"name": repository})
|
||||
}
|
||||
|
||||
var kubeNodeName string
|
||||
if drvName != driver.None {
|
||||
kubeNodeName = "m01"
|
||||
}
|
||||
|
||||
return createNode(cmd, k8sVersion, kubeNodeName, drvName,
|
||||
repository, selectedEnableDefaultCNI, selectedNetworkPlugin)
|
||||
}
|
||||
|
||||
func createNode(cmd *cobra.Command, k8sVersion, kubeNodeName, drvName, repository string,
|
||||
selectedEnableDefaultCNI bool, selectedNetworkPlugin string) (config.ClusterConfig, config.Node, error) {
|
||||
|
||||
sysLimit, containerLimit, err := memoryLimits(drvName)
|
||||
if err != nil {
|
||||
glog.Warningf("Unable to query memory limits: %v", err)
|
||||
}
|
||||
|
||||
mem := suggestMemoryAllocation(sysLimit, containerLimit)
|
||||
if cmd.Flags().Changed(memory) {
|
||||
mem, err = pkgutil.CalculateSizeInMB(viper.GetString(memory))
|
||||
if err != nil {
|
||||
exit.WithCodeT(exit.Config, "Generate unable to parse memory '{{.memory}}': {{.error}}", out.V{"memory": viper.GetString(memory), "error": err})
|
||||
}
|
||||
|
||||
} else {
|
||||
glog.Infof("Using suggested %dMB memory alloc based on sys=%dMB, container=%dMB", mem, sysLimit, containerLimit)
|
||||
}
|
||||
|
||||
func createNode(cc config.ClusterConfig, kubeNodeName string) (config.ClusterConfig, config.Node, error) {
|
||||
// Create the initial node, which will necessarily be a control plane
|
||||
cp := config.Node{
|
||||
Port: viper.GetInt(apiServerPort),
|
||||
KubernetesVersion: k8sVersion,
|
||||
Port: cc.KubernetesConfig.NodePort,
|
||||
KubernetesVersion: getKubernetesVersion(&cc),
|
||||
Name: kubeNodeName,
|
||||
ControlPlane: true,
|
||||
Worker: true,
|
||||
}
|
||||
|
||||
diskSize, err := pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize))
|
||||
if err != nil {
|
||||
exit.WithCodeT(exit.Config, "Generate unable to parse disk size '{{.diskSize}}': {{.error}}", out.V{"diskSize": viper.GetString(humanReadableDiskSize), "error": err})
|
||||
}
|
||||
|
||||
cfg := config.ClusterConfig{
|
||||
Name: viper.GetString(config.ProfileName),
|
||||
KeepContext: viper.GetBool(keepContext),
|
||||
EmbedCerts: viper.GetBool(embedCerts),
|
||||
MinikubeISO: viper.GetString(isoURL),
|
||||
Memory: mem,
|
||||
CPUs: viper.GetInt(cpus),
|
||||
DiskSize: diskSize,
|
||||
Driver: drvName,
|
||||
HyperkitVpnKitSock: viper.GetString(vpnkitSock),
|
||||
HyperkitVSockPorts: viper.GetStringSlice(vsockPorts),
|
||||
NFSShare: viper.GetStringSlice(nfsShare),
|
||||
NFSSharesRoot: viper.GetString(nfsSharesRoot),
|
||||
DockerEnv: node.DockerEnv,
|
||||
DockerOpt: node.DockerOpt,
|
||||
InsecureRegistry: insecureRegistry,
|
||||
RegistryMirror: registryMirror,
|
||||
HostOnlyCIDR: viper.GetString(hostOnlyCIDR),
|
||||
HypervVirtualSwitch: viper.GetString(hypervVirtualSwitch),
|
||||
HypervUseExternalSwitch: viper.GetBool(hypervUseExternalSwitch),
|
||||
HypervExternalAdapter: viper.GetString(hypervExternalAdapter),
|
||||
KVMNetwork: viper.GetString(kvmNetwork),
|
||||
KVMQemuURI: viper.GetString(kvmQemuURI),
|
||||
KVMGPU: viper.GetBool(kvmGPU),
|
||||
KVMHidden: viper.GetBool(kvmHidden),
|
||||
DisableDriverMounts: viper.GetBool(disableDriverMounts),
|
||||
UUID: viper.GetString(uuid),
|
||||
NoVTXCheck: viper.GetBool(noVTXCheck),
|
||||
DNSProxy: viper.GetBool(dnsProxy),
|
||||
HostDNSResolver: viper.GetBool(hostDNSResolver),
|
||||
HostOnlyNicType: viper.GetString(hostOnlyNicType),
|
||||
NatNicType: viper.GetString(natNicType),
|
||||
KubernetesConfig: config.KubernetesConfig{
|
||||
KubernetesVersion: k8sVersion,
|
||||
ClusterName: viper.GetString(config.ProfileName),
|
||||
APIServerName: viper.GetString(apiServerName),
|
||||
APIServerNames: apiServerNames,
|
||||
APIServerIPs: apiServerIPs,
|
||||
DNSDomain: viper.GetString(dnsDomain),
|
||||
FeatureGates: viper.GetString(featureGates),
|
||||
ContainerRuntime: viper.GetString(containerRuntime),
|
||||
CRISocket: viper.GetString(criSocket),
|
||||
NetworkPlugin: selectedNetworkPlugin,
|
||||
ServiceCIDR: viper.GetString(serviceCIDR),
|
||||
ImageRepository: repository,
|
||||
ExtraOptions: node.ExtraOptions,
|
||||
ShouldLoadCachedImages: viper.GetBool(cacheImages),
|
||||
EnableDefaultCNI: selectedEnableDefaultCNI,
|
||||
},
|
||||
Nodes: []config.Node{cp},
|
||||
}
|
||||
return cfg, cp, nil
|
||||
cc.Nodes = []config.Node{cp}
|
||||
return cc, cp, nil
|
||||
}
|
||||
|
||||
// setDockerProxy sets the proxy environment variables in the docker environment.
|
||||
|
@ -951,7 +881,7 @@ func setDockerProxy() {
|
|||
continue
|
||||
}
|
||||
}
|
||||
node.DockerEnv = append(node.DockerEnv, fmt.Sprintf("%s=%s", k, v))
|
||||
config.DockerEnv = append(config.DockerEnv, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -960,10 +890,10 @@ func setDockerProxy() {
|
|||
func autoSetDriverOptions(cmd *cobra.Command, drvName string) (err error) {
|
||||
err = nil
|
||||
hints := driver.FlagDefaults(drvName)
|
||||
if !cmd.Flags().Changed("extra-config") && len(hints.ExtraOptions) > 0 {
|
||||
if len(hints.ExtraOptions) > 0 {
|
||||
for _, eo := range hints.ExtraOptions {
|
||||
glog.Infof("auto setting extra-config to %q.", eo)
|
||||
err = node.ExtraOptions.Set(eo)
|
||||
err = config.ExtraOptions.Set(eo)
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "setting extra option %s", eo)
|
||||
}
|
||||
|
@ -992,13 +922,15 @@ func autoSetDriverOptions(cmd *cobra.Command, drvName string) (err error) {
|
|||
func getKubernetesVersion(old *config.ClusterConfig) string {
|
||||
paramVersion := viper.GetString(kubernetesVersion)
|
||||
|
||||
if paramVersion == "" { // if the user did not specify any version then ...
|
||||
if old != nil { // .. use the old version from config (if any)
|
||||
paramVersion = old.KubernetesConfig.KubernetesVersion
|
||||
}
|
||||
if paramVersion == "" { // .. otherwise use the default version
|
||||
paramVersion = constants.DefaultKubernetesVersion
|
||||
}
|
||||
// try to load the old version first if the user didn't specify anything
|
||||
if paramVersion == "" && old != nil {
|
||||
paramVersion = old.KubernetesConfig.KubernetesVersion
|
||||
}
|
||||
|
||||
if paramVersion == "" || strings.EqualFold(paramVersion, "stable") {
|
||||
paramVersion = constants.DefaultKubernetesVersion
|
||||
} else if strings.EqualFold(paramVersion, "latest") {
|
||||
paramVersion = constants.NewestKubernetesVersion
|
||||
}
|
||||
|
||||
nvs, err := semver.Make(strings.TrimPrefix(paramVersion, version.VersionPrefix))
|
||||
|
|
|
@ -0,0 +1,587 @@
|
|||
/*
|
||||
Copyright 2020 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||
"k8s.io/minikube/pkg/minikube/download"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
pkgutil "k8s.io/minikube/pkg/util"
|
||||
"k8s.io/minikube/pkg/version"
|
||||
)
|
||||
|
||||
const (
|
||||
isoURL = "iso-url"
|
||||
memory = "memory"
|
||||
cpus = "cpus"
|
||||
humanReadableDiskSize = "disk-size"
|
||||
nfsSharesRoot = "nfs-shares-root"
|
||||
nfsShare = "nfs-share"
|
||||
kubernetesVersion = "kubernetes-version"
|
||||
hostOnlyCIDR = "host-only-cidr"
|
||||
containerRuntime = "container-runtime"
|
||||
criSocket = "cri-socket"
|
||||
networkPlugin = "network-plugin"
|
||||
enableDefaultCNI = "enable-default-cni"
|
||||
hypervVirtualSwitch = "hyperv-virtual-switch"
|
||||
hypervUseExternalSwitch = "hyperv-use-external-switch"
|
||||
hypervExternalAdapter = "hyperv-external-adapter"
|
||||
kvmNetwork = "kvm-network"
|
||||
kvmQemuURI = "kvm-qemu-uri"
|
||||
kvmGPU = "kvm-gpu"
|
||||
kvmHidden = "kvm-hidden"
|
||||
minikubeEnvPrefix = "MINIKUBE"
|
||||
installAddons = "install-addons"
|
||||
defaultDiskSize = "20000mb"
|
||||
keepContext = "keep-context"
|
||||
createMount = "mount"
|
||||
featureGates = "feature-gates"
|
||||
apiServerName = "apiserver-name"
|
||||
apiServerPort = "apiserver-port"
|
||||
dnsDomain = "dns-domain"
|
||||
serviceCIDR = "service-cluster-ip-range"
|
||||
imageRepository = "image-repository"
|
||||
imageMirrorCountry = "image-mirror-country"
|
||||
mountString = "mount-string"
|
||||
disableDriverMounts = "disable-driver-mounts"
|
||||
cacheImages = "cache-images"
|
||||
uuid = "uuid"
|
||||
vpnkitSock = "hyperkit-vpnkit-sock"
|
||||
vsockPorts = "hyperkit-vsock-ports"
|
||||
embedCerts = "embed-certs"
|
||||
noVTXCheck = "no-vtx-check"
|
||||
downloadOnly = "download-only"
|
||||
dnsProxy = "dns-proxy"
|
||||
hostDNSResolver = "host-dns-resolver"
|
||||
waitComponents = "wait"
|
||||
force = "force"
|
||||
dryRun = "dry-run"
|
||||
interactive = "interactive"
|
||||
waitTimeout = "wait-timeout"
|
||||
nativeSSH = "native-ssh"
|
||||
minUsableMem = 1024 // Kubernetes will not start with less than 1GB
|
||||
minRecommendedMem = 2000 // Warn at no lower than existing configurations
|
||||
minimumCPUS = 2
|
||||
minimumDiskSize = 2000
|
||||
autoUpdate = "auto-update-drivers"
|
||||
hostOnlyNicType = "host-only-nic-type"
|
||||
natNicType = "nat-nic-type"
|
||||
nodes = "nodes"
|
||||
preload = "preload"
|
||||
deleteOnFailure = "delete-on-failure"
|
||||
)
|
||||
|
||||
// initMinikubeFlags includes commandline flags for minikube.
|
||||
func initMinikubeFlags() {
|
||||
viper.SetEnvPrefix(minikubeEnvPrefix)
|
||||
// Replaces '-' in flags with '_' in env variables
|
||||
// e.g. iso-url => $ENVPREFIX_ISO_URL
|
||||
viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
|
||||
viper.AutomaticEnv()
|
||||
|
||||
startCmd.Flags().Bool(force, false, "Force minikube to perform possibly dangerous operations")
|
||||
startCmd.Flags().Bool(interactive, true, "Allow user prompts for more information")
|
||||
startCmd.Flags().Bool(dryRun, false, "dry-run mode. Validates configuration, but does not mutate system state")
|
||||
|
||||
startCmd.Flags().Int(cpus, 2, "Number of CPUs allocated to Kubernetes.")
|
||||
startCmd.Flags().String(memory, "", "Amount of RAM to allocate to Kubernetes (format: <number>[<unit>], where unit = b, k, m or g).")
|
||||
startCmd.Flags().String(humanReadableDiskSize, defaultDiskSize, "Disk size allocated to the minikube VM (format: <number>[<unit>], where unit = b, k, m or g).")
|
||||
startCmd.Flags().Bool(downloadOnly, false, "If true, only download and cache files for later use - don't install or start anything.")
|
||||
startCmd.Flags().Bool(cacheImages, true, "If true, cache docker images for the current bootstrapper and load them into the machine. Always false with --driver=none.")
|
||||
startCmd.Flags().StringSlice(isoURL, download.DefaultISOURLs(), "Locations to fetch the minikube ISO from.")
|
||||
startCmd.Flags().Bool(keepContext, false, "This will keep the existing kubectl context and will create a minikube context.")
|
||||
startCmd.Flags().Bool(embedCerts, false, "if true, will embed the certs in kubeconfig.")
|
||||
startCmd.Flags().String(containerRuntime, "docker", "The container runtime to be used (docker, crio, containerd).")
|
||||
startCmd.Flags().Bool(createMount, false, "This will start the mount daemon and automatically mount files into minikube.")
|
||||
startCmd.Flags().String(mountString, constants.DefaultMountDir+":/minikube-host", "The argument to pass the minikube mount command on start.")
|
||||
startCmd.Flags().StringArrayVar(&config.AddonList, "addons", nil, "Enable addons. see `minikube addons list` for a list of valid addon names.")
|
||||
startCmd.Flags().String(criSocket, "", "The cri socket path to be used.")
|
||||
startCmd.Flags().String(networkPlugin, "", "The name of the network plugin.")
|
||||
startCmd.Flags().Bool(enableDefaultCNI, false, "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \"--network-plugin=cni\".")
|
||||
startCmd.Flags().StringSlice(waitComponents, kverify.DefaultWaitList, fmt.Sprintf("comma separated list of kubernetes components to verify and wait for after starting a cluster. defaults to %q, available options: %q . other acceptable values are 'all' or 'none', 'true' and 'false'", strings.Join(kverify.DefaultWaitList, ","), strings.Join(kverify.AllComponentsList, ",")))
|
||||
startCmd.Flags().Duration(waitTimeout, 6*time.Minute, "max time to wait per Kubernetes core services to be healthy.")
|
||||
startCmd.Flags().Bool(nativeSSH, true, "Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'.")
|
||||
startCmd.Flags().Bool(autoUpdate, true, "If set, automatically updates drivers to the latest version. Defaults to true.")
|
||||
startCmd.Flags().Bool(installAddons, true, "If set, install addons. Defaults to true.")
|
||||
startCmd.Flags().IntP(nodes, "n", 1, "The number of nodes to spin up. Defaults to 1.")
|
||||
startCmd.Flags().Bool(preload, true, "If set, download tarball of preloaded images if available to improve start time. Defaults to true.")
|
||||
startCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.")
|
||||
}
|
||||
|
||||
// initKubernetesFlags inits the commandline flags for kubernetes related options
|
||||
func initKubernetesFlags() {
|
||||
startCmd.Flags().String(kubernetesVersion, "", fmt.Sprintf("The kubernetes version that the minikube VM will use (ex: v1.2.3, 'stable' for %s, 'latest' for %s). Defaults to 'stable'.", constants.DefaultKubernetesVersion, constants.NewestKubernetesVersion))
|
||||
startCmd.Flags().Var(&config.ExtraOptions, "extra-config",
|
||||
`A set of key=value pairs that describe configuration that may be passed to different components.
|
||||
The key should be '.' separated, and the first part before the dot is the component to apply the configuration to.
|
||||
Valid components are: kubelet, kubeadm, apiserver, controller-manager, etcd, proxy, scheduler
|
||||
Valid kubeadm parameters: `+fmt.Sprintf("%s, %s", strings.Join(bsutil.KubeadmExtraArgsWhitelist[bsutil.KubeadmCmdParam], ", "), strings.Join(bsutil.KubeadmExtraArgsWhitelist[bsutil.KubeadmConfigParam], ",")))
|
||||
startCmd.Flags().String(featureGates, "", "A set of key=value pairs that describe feature gates for alpha/experimental features.")
|
||||
startCmd.Flags().String(dnsDomain, constants.ClusterDNSDomain, "The cluster dns domain name used in the kubernetes cluster")
|
||||
startCmd.Flags().Int(apiServerPort, constants.APIServerPort, "The apiserver listening port")
|
||||
startCmd.Flags().String(apiServerName, constants.APIServerName, "The authoritative apiserver hostname for apiserver certificates and connectivity. This can be used if you want to make the apiserver available from outside the machine")
|
||||
startCmd.Flags().StringArrayVar(&apiServerNames, "apiserver-names", nil, "A set of apiserver names which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine")
|
||||
startCmd.Flags().IPSliceVar(&apiServerIPs, "apiserver-ips", nil, "A set of apiserver IP Addresses which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine")
|
||||
}
|
||||
|
||||
// initDriverFlags inits the commandline flags for vm drivers
|
||||
func initDriverFlags() {
|
||||
startCmd.Flags().String("driver", "", fmt.Sprintf("Driver is one of: %v (defaults to auto-detect)", driver.DisplaySupportedDrivers()))
|
||||
startCmd.Flags().String("vm-driver", "", "DEPRECATED, use `driver` instead.")
|
||||
startCmd.Flags().Bool(disableDriverMounts, false, "Disables the filesystem mounts provided by the hypervisors")
|
||||
startCmd.Flags().Bool("vm", false, "Filter to use only VM Drivers")
|
||||
|
||||
// kvm2
|
||||
startCmd.Flags().String(kvmNetwork, "default", "The KVM network name. (kvm2 driver only)")
|
||||
startCmd.Flags().String(kvmQemuURI, "qemu:///system", "The KVM QEMU connection URI. (kvm2 driver only)")
|
||||
startCmd.Flags().Bool(kvmGPU, false, "Enable experimental NVIDIA GPU support in minikube")
|
||||
startCmd.Flags().Bool(kvmHidden, false, "Hide the hypervisor signature from the guest in minikube (kvm2 driver only)")
|
||||
|
||||
// virtualbox
|
||||
startCmd.Flags().String(hostOnlyCIDR, "192.168.99.1/24", "The CIDR to be used for the minikube VM (virtualbox driver only)")
|
||||
startCmd.Flags().Bool(dnsProxy, false, "Enable proxy for NAT DNS requests (virtualbox driver only)")
|
||||
startCmd.Flags().Bool(hostDNSResolver, true, "Enable host resolver for NAT DNS requests (virtualbox driver only)")
|
||||
startCmd.Flags().Bool(noVTXCheck, false, "Disable checking for the availability of hardware virtualization before the vm is started (virtualbox driver only)")
|
||||
startCmd.Flags().String(hostOnlyNicType, "virtio", "NIC Type used for host only network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only)")
|
||||
startCmd.Flags().String(natNicType, "virtio", "NIC Type used for host only network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only)")
|
||||
|
||||
// hyperkit
|
||||
startCmd.Flags().StringSlice(vsockPorts, []string{}, "List of guest VSock ports that should be exposed as sockets on the host (hyperkit driver only)")
|
||||
startCmd.Flags().String(uuid, "", "Provide VM UUID to restore MAC address (hyperkit driver only)")
|
||||
startCmd.Flags().String(vpnkitSock, "", "Location of the VPNKit socket used for networking. If empty, disables Hyperkit VPNKitSock, if 'auto' uses Docker for Mac VPNKit connection, otherwise uses the specified VSock (hyperkit driver only)")
|
||||
startCmd.Flags().StringSlice(nfsShare, []string{}, "Local folders to share with Guest via NFS mounts (hyperkit driver only)")
|
||||
startCmd.Flags().String(nfsSharesRoot, "/nfsshares", "Where to root the NFS Shares, defaults to /nfsshares (hyperkit driver only)")
|
||||
|
||||
// hyperv
|
||||
startCmd.Flags().String(hypervVirtualSwitch, "", "The hyperv virtual switch name. Defaults to first found. (hyperv driver only)")
|
||||
startCmd.Flags().Bool(hypervUseExternalSwitch, false, "Whether to use external switch over Default Switch if virtual switch not explicitly specified. (hyperv driver only)")
|
||||
startCmd.Flags().String(hypervExternalAdapter, "", "External Adapter on which external switch will be created if no external switch is found. (hyperv driver only)")
|
||||
}
|
||||
|
||||
// initNetworkingFlags inits the commandline flags for connectivity related flags for start
|
||||
func initNetworkingFlags() {
|
||||
startCmd.Flags().StringSliceVar(&insecureRegistry, "insecure-registry", nil, "Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added.")
|
||||
startCmd.Flags().StringSliceVar(®istryMirror, "registry-mirror", nil, "Registry mirrors to pass to the Docker daemon")
|
||||
startCmd.Flags().String(imageRepository, "", "Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to \"auto\" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers")
|
||||
startCmd.Flags().String(imageMirrorCountry, "", "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.")
|
||||
startCmd.Flags().String(serviceCIDR, constants.DefaultServiceCIDR, "The CIDR to be used for service cluster IPs.")
|
||||
startCmd.Flags().StringArrayVar(&config.DockerEnv, "docker-env", nil, "Environment variables to pass to the Docker daemon. (format: key=value)")
|
||||
startCmd.Flags().StringArrayVar(&config.DockerOpt, "docker-opt", nil, "Specify arbitrary flags to pass to the Docker daemon. (format: key=value)")
|
||||
}
|
||||
|
||||
// ClusterFlagValue returns the current cluster name based on flags
|
||||
func ClusterFlagValue() string {
|
||||
return viper.GetString(config.ProfileName)
|
||||
}
|
||||
|
||||
// generateClusterConfig generate a config.ClusterConfig based on flags or existing cluster config
|
||||
func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k8sVersion string, drvName string) (config.ClusterConfig, config.Node, error) {
|
||||
cc := config.ClusterConfig{}
|
||||
if existing != nil { // create profile config first time
|
||||
cc = updateExistingConfigFromFlags(cmd, existing)
|
||||
} else {
|
||||
glog.Info("no existing cluster config was found, will generate one from the flags ")
|
||||
sysLimit, containerLimit, err := memoryLimits(drvName)
|
||||
if err != nil {
|
||||
glog.Warningf("Unable to query memory limits: %v", err)
|
||||
}
|
||||
|
||||
mem := suggestMemoryAllocation(sysLimit, containerLimit)
|
||||
if cmd.Flags().Changed(memory) {
|
||||
mem, err = pkgutil.CalculateSizeInMB(viper.GetString(memory))
|
||||
if err != nil {
|
||||
exit.WithCodeT(exit.Config, "Generate unable to parse memory '{{.memory}}': {{.error}}", out.V{"memory": viper.GetString(memory), "error": err})
|
||||
}
|
||||
|
||||
} else {
|
||||
glog.Infof("Using suggested %dMB memory alloc based on sys=%dMB, container=%dMB", mem, sysLimit, containerLimit)
|
||||
}
|
||||
|
||||
diskSize, err := pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize))
|
||||
if err != nil {
|
||||
exit.WithCodeT(exit.Config, "Generate unable to parse disk size '{{.diskSize}}': {{.error}}", out.V{"diskSize": viper.GetString(humanReadableDiskSize), "error": err})
|
||||
}
|
||||
|
||||
r, err := cruntime.New(cruntime.Config{Type: viper.GetString(containerRuntime)})
|
||||
if err != nil {
|
||||
return cc, config.Node{}, errors.Wrap(err, "new runtime manager")
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(imageRepository) {
|
||||
cc.KubernetesConfig.ImageRepository = viper.GetString(imageRepository)
|
||||
}
|
||||
|
||||
// Pick good default values for --network-plugin and --enable-default-cni based on runtime.
|
||||
selectedEnableDefaultCNI := viper.GetBool(enableDefaultCNI)
|
||||
selectedNetworkPlugin := viper.GetString(networkPlugin)
|
||||
if r.DefaultCNI() && !cmd.Flags().Changed(networkPlugin) {
|
||||
selectedNetworkPlugin = "cni"
|
||||
if !cmd.Flags().Changed(enableDefaultCNI) {
|
||||
selectedEnableDefaultCNI = true
|
||||
}
|
||||
}
|
||||
|
||||
repository := viper.GetString(imageRepository)
|
||||
mirrorCountry := strings.ToLower(viper.GetString(imageMirrorCountry))
|
||||
if strings.ToLower(repository) == "auto" || mirrorCountry != "" {
|
||||
found, autoSelectedRepository, err := selectImageRepository(mirrorCountry, semver.MustParse(strings.TrimPrefix(k8sVersion, version.VersionPrefix)))
|
||||
if err != nil {
|
||||
exit.WithError("Failed to check main repository and mirrors for images for images", err)
|
||||
}
|
||||
|
||||
if !found {
|
||||
if autoSelectedRepository == "" {
|
||||
exit.WithCodeT(exit.Failure, "None of the known repositories is accessible. Consider specifying an alternative image repository with --image-repository flag")
|
||||
} else {
|
||||
out.WarningT("None of the known repositories in your location are accessible. Using {{.image_repository_name}} as fallback.", out.V{"image_repository_name": autoSelectedRepository})
|
||||
}
|
||||
}
|
||||
|
||||
repository = autoSelectedRepository
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(imageRepository) {
|
||||
out.T(out.SuccessType, "Using image repository {{.name}}", out.V{"name": repository})
|
||||
}
|
||||
|
||||
cc = config.ClusterConfig{
|
||||
Name: ClusterFlagValue(),
|
||||
KeepContext: viper.GetBool(keepContext),
|
||||
EmbedCerts: viper.GetBool(embedCerts),
|
||||
MinikubeISO: viper.GetString(isoURL),
|
||||
Memory: mem,
|
||||
CPUs: viper.GetInt(cpus),
|
||||
DiskSize: diskSize,
|
||||
Driver: drvName,
|
||||
HyperkitVpnKitSock: viper.GetString(vpnkitSock),
|
||||
HyperkitVSockPorts: viper.GetStringSlice(vsockPorts),
|
||||
NFSShare: viper.GetStringSlice(nfsShare),
|
||||
NFSSharesRoot: viper.GetString(nfsSharesRoot),
|
||||
DockerEnv: config.DockerEnv,
|
||||
DockerOpt: config.DockerOpt,
|
||||
InsecureRegistry: insecureRegistry,
|
||||
RegistryMirror: registryMirror,
|
||||
HostOnlyCIDR: viper.GetString(hostOnlyCIDR),
|
||||
HypervVirtualSwitch: viper.GetString(hypervVirtualSwitch),
|
||||
HypervUseExternalSwitch: viper.GetBool(hypervUseExternalSwitch),
|
||||
HypervExternalAdapter: viper.GetString(hypervExternalAdapter),
|
||||
KVMNetwork: viper.GetString(kvmNetwork),
|
||||
KVMQemuURI: viper.GetString(kvmQemuURI),
|
||||
KVMGPU: viper.GetBool(kvmGPU),
|
||||
KVMHidden: viper.GetBool(kvmHidden),
|
||||
DisableDriverMounts: viper.GetBool(disableDriverMounts),
|
||||
UUID: viper.GetString(uuid),
|
||||
NoVTXCheck: viper.GetBool(noVTXCheck),
|
||||
DNSProxy: viper.GetBool(dnsProxy),
|
||||
HostDNSResolver: viper.GetBool(hostDNSResolver),
|
||||
HostOnlyNicType: viper.GetString(hostOnlyNicType),
|
||||
NatNicType: viper.GetString(natNicType),
|
||||
KubernetesConfig: config.KubernetesConfig{
|
||||
KubernetesVersion: k8sVersion,
|
||||
ClusterName: ClusterFlagValue(),
|
||||
APIServerName: viper.GetString(apiServerName),
|
||||
APIServerNames: apiServerNames,
|
||||
APIServerIPs: apiServerIPs,
|
||||
DNSDomain: viper.GetString(dnsDomain),
|
||||
FeatureGates: viper.GetString(featureGates),
|
||||
ContainerRuntime: viper.GetString(containerRuntime),
|
||||
CRISocket: viper.GetString(criSocket),
|
||||
NetworkPlugin: selectedNetworkPlugin,
|
||||
ServiceCIDR: viper.GetString(serviceCIDR),
|
||||
ImageRepository: repository,
|
||||
ExtraOptions: config.ExtraOptions,
|
||||
ShouldLoadCachedImages: viper.GetBool(cacheImages),
|
||||
EnableDefaultCNI: selectedEnableDefaultCNI,
|
||||
NodePort: viper.GetInt(apiServerPort),
|
||||
},
|
||||
}
|
||||
cc.VerifyComponents = interpretWaitFlag(*cmd)
|
||||
}
|
||||
|
||||
r, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime})
|
||||
if err != nil {
|
||||
return cc, config.Node{}, errors.Wrap(err, "new runtime manager")
|
||||
}
|
||||
|
||||
// Feed Docker our host proxy environment by default, so that it can pull images
|
||||
// doing this for both new config and existing, in case proxy changed since previous start
|
||||
if _, ok := r.(*cruntime.Docker); ok && !cmd.Flags().Changed("docker-env") {
|
||||
setDockerProxy()
|
||||
}
|
||||
|
||||
var kubeNodeName string
|
||||
if driver.BareMetal(cc.Driver) {
|
||||
kubeNodeName = "m01"
|
||||
}
|
||||
return createNode(cc, kubeNodeName)
|
||||
}
|
||||
|
||||
// updateExistingConfigFromFlags will update the existing config from the flags - used on a second start
|
||||
// skipping updating existing docker env , docker opt, InsecureRegistry, registryMirror, extra-config, apiserver-ips
|
||||
func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterConfig) config.ClusterConfig { //nolint to supress cyclomatic complexity 45 of func `updateExistingConfigFromFlags` is high (> 30)
|
||||
validateFlags(cmd, existing.Driver)
|
||||
|
||||
if cmd.Flags().Changed(containerRuntime) {
|
||||
existing.KubernetesConfig.ContainerRuntime = viper.GetString(containerRuntime)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(keepContext) {
|
||||
existing.KeepContext = viper.GetBool(keepContext)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(embedCerts) {
|
||||
existing.EmbedCerts = viper.GetBool(embedCerts)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(isoURL) {
|
||||
existing.MinikubeISO = viper.GetString(isoURL)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(memory) {
|
||||
memInMB, err := pkgutil.CalculateSizeInMB(viper.GetString(memory))
|
||||
if err != nil {
|
||||
glog.Warningf("error calculate memory size in mb : %v", err)
|
||||
}
|
||||
if memInMB != existing.Memory {
|
||||
out.WarningT("You cannot change the memory size for an exiting minikube cluster. Please first delete the cluster.")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(cpus) {
|
||||
if viper.GetInt(cpus) != existing.CPUs {
|
||||
out.WarningT("You cannot change the CPUs for an exiting minikube cluster. Please first delete the cluster.")
|
||||
}
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(humanReadableDiskSize) {
|
||||
memInMB, err := pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize))
|
||||
if err != nil {
|
||||
glog.Warningf("error calculate disk size in mb : %v", err)
|
||||
}
|
||||
|
||||
if memInMB != existing.DiskSize {
|
||||
out.WarningT("You cannot change the Disk size for an exiting minikube cluster. Please first delete the cluster.")
|
||||
}
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(vpnkitSock) {
|
||||
existing.HyperkitVpnKitSock = viper.GetString(vpnkitSock)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(vsockPorts) {
|
||||
existing.HyperkitVSockPorts = viper.GetStringSlice(vsockPorts)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(nfsShare) {
|
||||
existing.NFSShare = viper.GetStringSlice(nfsShare)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(nfsSharesRoot) {
|
||||
existing.NFSSharesRoot = viper.GetString(nfsSharesRoot)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(hostOnlyCIDR) {
|
||||
existing.HostOnlyCIDR = viper.GetString(hostOnlyCIDR)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(hypervVirtualSwitch) {
|
||||
existing.HypervVirtualSwitch = viper.GetString(hypervVirtualSwitch)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(hypervUseExternalSwitch) {
|
||||
existing.HypervUseExternalSwitch = viper.GetBool(hypervUseExternalSwitch)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(hypervExternalAdapter) {
|
||||
existing.HypervExternalAdapter = viper.GetString(hypervExternalAdapter)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(kvmNetwork) {
|
||||
existing.KVMNetwork = viper.GetString(kvmNetwork)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(kvmQemuURI) {
|
||||
existing.KVMQemuURI = viper.GetString(kvmQemuURI)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(kvmGPU) {
|
||||
existing.KVMGPU = viper.GetBool(kvmGPU)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(kvmHidden) {
|
||||
existing.KVMHidden = viper.GetBool(kvmHidden)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(disableDriverMounts) {
|
||||
existing.DisableDriverMounts = viper.GetBool(disableDriverMounts)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(uuid) {
|
||||
existing.UUID = viper.GetString(uuid)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(noVTXCheck) {
|
||||
existing.NoVTXCheck = viper.GetBool(noVTXCheck)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(dnsProxy) {
|
||||
existing.DNSProxy = viper.GetBool(dnsProxy)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(hostDNSResolver) {
|
||||
existing.HostDNSResolver = viper.GetBool(hostDNSResolver)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(hostOnlyNicType) {
|
||||
existing.HostOnlyNicType = viper.GetString(hostOnlyNicType)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(natNicType) {
|
||||
existing.NatNicType = viper.GetString(natNicType)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(kubernetesVersion) {
|
||||
existing.KubernetesConfig.KubernetesVersion = viper.GetString(kubernetesVersion)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(apiServerName) {
|
||||
existing.KubernetesConfig.APIServerName = viper.GetString(apiServerName)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed("apiserver-names") {
|
||||
existing.KubernetesConfig.APIServerNames = viper.GetStringSlice("apiserver-names")
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(apiServerPort) {
|
||||
existing.KubernetesConfig.NodePort = viper.GetInt(apiServerPort)
|
||||
}
|
||||
|
||||
// pre minikube 1.9.2 cc.KubernetesConfig.NodePort was not populated.
|
||||
// in minikube config there were two fields for api server port.
|
||||
// one in cc.KubernetesConfig.NodePort and one in cc.Nodes.Port
|
||||
// this makes sure api server port not be set as 0!
|
||||
if existing.KubernetesConfig.NodePort == 0 {
|
||||
existing.KubernetesConfig.NodePort = viper.GetInt(apiServerPort)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(dnsDomain) {
|
||||
existing.KubernetesConfig.DNSDomain = viper.GetString(dnsDomain)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(featureGates) {
|
||||
existing.KubernetesConfig.FeatureGates = viper.GetString(featureGates)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(containerRuntime) {
|
||||
existing.KubernetesConfig.ContainerRuntime = viper.GetString(containerRuntime)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(criSocket) {
|
||||
existing.KubernetesConfig.CRISocket = viper.GetString(criSocket)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(criSocket) {
|
||||
existing.KubernetesConfig.NetworkPlugin = viper.GetString(criSocket)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(networkPlugin) {
|
||||
existing.KubernetesConfig.NetworkPlugin = viper.GetString(networkPlugin)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(serviceCIDR) {
|
||||
existing.KubernetesConfig.ServiceCIDR = viper.GetString(serviceCIDR)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(cacheImages) {
|
||||
existing.KubernetesConfig.ShouldLoadCachedImages = viper.GetBool(cacheImages)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(imageRepository) {
|
||||
existing.KubernetesConfig.ImageRepository = viper.GetString(imageRepository)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(enableDefaultCNI) {
|
||||
existing.KubernetesConfig.EnableDefaultCNI = viper.GetBool(enableDefaultCNI)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(waitComponents) {
|
||||
existing.VerifyComponents = interpretWaitFlag(*cmd)
|
||||
}
|
||||
|
||||
return *existing
|
||||
}
|
||||
|
||||
// interpretWaitFlag interprets the wait flag and respects the legacy minikube users
|
||||
// returns map of components to wait for
|
||||
func interpretWaitFlag(cmd cobra.Command) map[string]bool {
|
||||
if !cmd.Flags().Changed(waitComponents) {
|
||||
glog.Infof("Wait components to verify : %+v", kverify.DefaultComponents)
|
||||
return kverify.DefaultComponents
|
||||
}
|
||||
|
||||
waitFlags, err := cmd.Flags().GetStringSlice(waitComponents)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to read --wait from flags: %v.\n Moving on will use the default wait components: %+v", err, kverify.DefaultComponents)
|
||||
return kverify.DefaultComponents
|
||||
}
|
||||
|
||||
if len(waitFlags) == 1 {
|
||||
// respecting legacy flag before minikube 1.9.0, wait flag was boolean
|
||||
if waitFlags[0] == "false" || waitFlags[0] == "none" {
|
||||
glog.Infof("Waiting for no components: %+v", kverify.NoComponents)
|
||||
return kverify.NoComponents
|
||||
}
|
||||
// respecting legacy flag before minikube 1.9.0, wait flag was boolean
|
||||
if waitFlags[0] == "true" || waitFlags[0] == "all" {
|
||||
glog.Infof("Waiting for all components: %+v", kverify.AllComponents)
|
||||
return kverify.AllComponents
|
||||
}
|
||||
}
|
||||
|
||||
waitComponents := kverify.NoComponents
|
||||
for _, wc := range waitFlags {
|
||||
seen := false
|
||||
for _, valid := range kverify.AllComponentsList {
|
||||
if wc == valid {
|
||||
waitComponents[wc] = true
|
||||
seen = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
if !seen {
|
||||
glog.Warningf("The value %q is invalid for --wait flag. valid options are %q", wc, strings.Join(kverify.AllComponentsList, ","))
|
||||
}
|
||||
}
|
||||
glog.Infof("Waiting for components: %+v", waitComponents)
|
||||
return waitComponents
|
||||
}
|
|
@ -26,7 +26,7 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
)
|
||||
|
||||
func TestGetKuberneterVersion(t *testing.T) {
|
||||
func TestGetKubernetesVersion(t *testing.T) {
|
||||
var tests = []struct {
|
||||
description string
|
||||
expectedVersion string
|
||||
|
@ -55,6 +55,16 @@ func TestGetKuberneterVersion(t *testing.T) {
|
|||
paramVersion: "v1.16.0",
|
||||
cfg: &cfg.ClusterConfig{KubernetesConfig: cfg.KubernetesConfig{KubernetesVersion: "v1.15.0"}},
|
||||
},
|
||||
{
|
||||
description: "kubernetes-version given as 'stable', no config",
|
||||
expectedVersion: constants.DefaultKubernetesVersion,
|
||||
paramVersion: "stable",
|
||||
},
|
||||
{
|
||||
description: "kubernetes-version given as 'latest', no config",
|
||||
expectedVersion: constants.NewestKubernetesVersion,
|
||||
paramVersion: "latest",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
@ -70,6 +80,50 @@ func TestGetKuberneterVersion(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestMirrorCountry(t *testing.T) {
|
||||
// Set default disk size value in lieu of flag init
|
||||
viper.SetDefault(humanReadableDiskSize, defaultDiskSize)
|
||||
|
||||
k8sVersion := constants.DefaultKubernetesVersion
|
||||
var tests = []struct {
|
||||
description string
|
||||
k8sVersion string
|
||||
imageRepository string
|
||||
mirrorCountry string
|
||||
cfg *cfg.ClusterConfig
|
||||
}{
|
||||
{
|
||||
description: "image-repository none, image-mirror-country none",
|
||||
imageRepository: "",
|
||||
mirrorCountry: "",
|
||||
},
|
||||
{
|
||||
description: "image-repository auto, image-mirror-country none",
|
||||
imageRepository: "auto",
|
||||
mirrorCountry: "",
|
||||
},
|
||||
{
|
||||
description: "image-repository auto, image-mirror-country china",
|
||||
imageRepository: "auto",
|
||||
mirrorCountry: "cn",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
cmd := &cobra.Command{}
|
||||
viper.SetDefault(imageRepository, test.imageRepository)
|
||||
viper.SetDefault(imageMirrorCountry, test.mirrorCountry)
|
||||
config, _, err := generateClusterConfig(cmd, nil, k8sVersion, "none")
|
||||
if err != nil {
|
||||
t.Fatalf("Got unexpected error %v during config generation", err)
|
||||
}
|
||||
// the result can still be "", but anyway
|
||||
_ = config.KubernetesConfig.ImageRepository
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateCfgFromFlagsHTTPProxyHandling(t *testing.T) {
|
||||
// Set default disk size value in lieu of flag init
|
||||
viper.SetDefault(humanReadableDiskSize, defaultDiskSize)
|
||||
|
@ -112,7 +166,7 @@ func TestGenerateCfgFromFlagsHTTPProxyHandling(t *testing.T) {
|
|||
if err := os.Setenv("HTTP_PROXY", test.proxy); err != nil {
|
||||
t.Fatalf("Unexpected error setting HTTP_PROXY: %v", err)
|
||||
}
|
||||
config, _, err := generateCfgFromFlags(cmd, k8sVersion, "none")
|
||||
config, _, err := generateClusterConfig(cmd, nil, k8sVersion, "none")
|
||||
if err != nil {
|
||||
t.Fatalf("Got unexpected error %v during config generation", err)
|
||||
}
|
||||
|
|
|
@ -29,16 +29,14 @@ import (
|
|||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify"
|
||||
"k8s.io/minikube/pkg/minikube/cluster"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/kubeconfig"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
)
|
||||
|
||||
var statusFormat string
|
||||
|
@ -56,24 +54,35 @@ const (
|
|||
|
||||
// Nonexistent means nonexistent
|
||||
Nonexistent = "Nonexistent" // ~state.None
|
||||
// Irrelevant is used for statuses that aren't meaningful for worker nodes
|
||||
Irrelevant = "Irrelevant"
|
||||
)
|
||||
|
||||
// Status holds string representations of component states
|
||||
type Status struct {
|
||||
Name string
|
||||
Host string
|
||||
Kubelet string
|
||||
APIServer string
|
||||
Kubeconfig string
|
||||
Worker bool
|
||||
}
|
||||
|
||||
const (
|
||||
minikubeNotRunningStatusFlag = 1 << 0
|
||||
clusterNotRunningStatusFlag = 1 << 1
|
||||
k8sNotRunningStatusFlag = 1 << 2
|
||||
defaultStatusFormat = `host: {{.Host}}
|
||||
defaultStatusFormat = `{{.Name}}
|
||||
host: {{.Host}}
|
||||
kubelet: {{.Kubelet}}
|
||||
apiserver: {{.APIServer}}
|
||||
kubeconfig: {{.Kubeconfig}}
|
||||
|
||||
`
|
||||
workerStatusFormat = `{{.Name}}
|
||||
host: {{.Host}}
|
||||
kubelet: {{.Kubelet}}
|
||||
|
||||
`
|
||||
)
|
||||
|
||||
|
@ -90,47 +99,39 @@ var statusCmd = &cobra.Command{
|
|||
exit.UsageT("Cannot use both --output and --format options")
|
||||
}
|
||||
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
exit.WithCodeT(exit.Unavailable, "Error getting client: {{.error}}", out.V{"error": err})
|
||||
}
|
||||
defer api.Close()
|
||||
cname := ClusterFlagValue()
|
||||
api, cc := mustload.Partial(cname)
|
||||
|
||||
cc, err := config.Load(viper.GetString(config.ProfileName))
|
||||
if err != nil {
|
||||
if config.IsNotExist(err) {
|
||||
exit.WithCodeT(exitCode(&Status{}), `The "{{.name}}" cluster does not exist!`, out.V{"name": viper.GetString(config.ProfileName)})
|
||||
var st *Status
|
||||
var err error
|
||||
for _, n := range cc.Nodes {
|
||||
glog.Infof("checking status of %s ...", n.Name)
|
||||
machineName := driver.MachineName(*cc, n)
|
||||
st, err = status(api, *cc, n)
|
||||
glog.Infof("%s status: %+v", machineName, st)
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf("status error: %v", err)
|
||||
}
|
||||
exit.WithError("getting config", err)
|
||||
}
|
||||
|
||||
cp, err := config.PrimaryControlPlane(cc)
|
||||
if err != nil {
|
||||
exit.WithError("getting primary control plane", err)
|
||||
}
|
||||
|
||||
machineName := driver.MachineName(*cc, cp)
|
||||
st, err := status(api, machineName)
|
||||
if err != nil {
|
||||
glog.Errorf("status error: %v", err)
|
||||
}
|
||||
if st.Host == Nonexistent {
|
||||
glog.Errorf("The %q cluster does not exist!", machineName)
|
||||
}
|
||||
|
||||
switch strings.ToLower(output) {
|
||||
case "text":
|
||||
if err := statusText(st, os.Stdout); err != nil {
|
||||
exit.WithError("status text failure", err)
|
||||
if st.Host == Nonexistent {
|
||||
glog.Errorf("The %q host does not exist!", machineName)
|
||||
}
|
||||
case "json":
|
||||
if err := statusJSON(st, os.Stdout); err != nil {
|
||||
exit.WithError("status json failure", err)
|
||||
|
||||
switch strings.ToLower(output) {
|
||||
case "text":
|
||||
if err := statusText(st, os.Stdout); err != nil {
|
||||
exit.WithError("status text failure", err)
|
||||
}
|
||||
case "json":
|
||||
if err := statusJSON(st, os.Stdout); err != nil {
|
||||
exit.WithError("status json failure", err)
|
||||
}
|
||||
default:
|
||||
exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'text', 'json'", output))
|
||||
}
|
||||
default:
|
||||
exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'text', 'json'", output))
|
||||
}
|
||||
|
||||
// TODO: Update for multi-node
|
||||
os.Exit(exitCode(st))
|
||||
},
|
||||
}
|
||||
|
@ -140,21 +141,27 @@ func exitCode(st *Status) int {
|
|||
if st.Host != state.Running.String() {
|
||||
c |= minikubeNotRunningStatusFlag
|
||||
}
|
||||
if st.APIServer != state.Running.String() || st.Kubelet != state.Running.String() {
|
||||
if (st.APIServer != state.Running.String() && st.APIServer != Irrelevant) || st.Kubelet != state.Running.String() {
|
||||
c |= clusterNotRunningStatusFlag
|
||||
}
|
||||
if st.Kubeconfig != Configured {
|
||||
if st.Kubeconfig != Configured && st.Kubeconfig != Irrelevant {
|
||||
c |= k8sNotRunningStatusFlag
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func status(api libmachine.API, name string) (*Status, error) {
|
||||
func status(api libmachine.API, cc config.ClusterConfig, n config.Node) (*Status, error) {
|
||||
|
||||
controlPlane := n.ControlPlane
|
||||
name := driver.MachineName(cc, n)
|
||||
|
||||
st := &Status{
|
||||
Name: name,
|
||||
Host: Nonexistent,
|
||||
APIServer: Nonexistent,
|
||||
Kubelet: Nonexistent,
|
||||
Kubeconfig: Nonexistent,
|
||||
Worker: !controlPlane,
|
||||
}
|
||||
|
||||
hs, err := machine.Status(api, name)
|
||||
|
@ -179,24 +186,16 @@ func status(api libmachine.API, name string) (*Status, error) {
|
|||
}
|
||||
|
||||
// We have a fully operational host, now we can check for details
|
||||
ip, err := cluster.GetHostDriverIP(api, name)
|
||||
if err != nil {
|
||||
glog.Errorln("Error host driver ip status:", err)
|
||||
st.APIServer = state.Error.String()
|
||||
if _, err := cluster.GetHostDriverIP(api, name); err != nil {
|
||||
glog.Errorf("failed to get driver ip: %v", err)
|
||||
st.Host = state.Error.String()
|
||||
return st, err
|
||||
}
|
||||
|
||||
port, err := kubeconfig.Port(name)
|
||||
if err != nil {
|
||||
glog.Warningf("unable to get port: %v", err)
|
||||
port = constants.APIServerPort
|
||||
}
|
||||
|
||||
st.Kubeconfig = Misconfigured
|
||||
ok, err := kubeconfig.IsClusterInConfig(ip, name)
|
||||
glog.Infof("%s is in kubeconfig at ip %s: %v (err=%v)", name, ip, ok, err)
|
||||
if ok {
|
||||
st.Kubeconfig = Configured
|
||||
st.Kubeconfig = Configured
|
||||
if !controlPlane {
|
||||
st.Kubeconfig = Irrelevant
|
||||
st.APIServer = Irrelevant
|
||||
}
|
||||
|
||||
host, err := machine.LoadHost(api, name)
|
||||
|
@ -209,17 +208,28 @@ func status(api libmachine.API, name string) (*Status, error) {
|
|||
return st, err
|
||||
}
|
||||
|
||||
stk, err := kverify.KubeletStatus(cr)
|
||||
glog.Infof("%s kubelet status = %s (err=%v)", name, stk, err)
|
||||
stk := kverify.KubeletStatus(cr)
|
||||
glog.Infof("%s kubelet status = %s", name, stk)
|
||||
st.Kubelet = stk.String()
|
||||
|
||||
if err != nil {
|
||||
glog.Warningf("kubelet err: %v", err)
|
||||
st.Kubelet = state.Error.String()
|
||||
} else {
|
||||
st.Kubelet = stk.String()
|
||||
// Early exit for regular nodes
|
||||
if !controlPlane {
|
||||
return st, nil
|
||||
}
|
||||
|
||||
sta, err := kverify.APIServerStatus(cr, ip, port)
|
||||
hostname, _, port, err := driver.ControlPaneEndpoint(&cc, &n, host.DriverName)
|
||||
if err != nil {
|
||||
glog.Errorf("forwarded endpoint: %v", err)
|
||||
st.Kubeconfig = Misconfigured
|
||||
} else {
|
||||
err := kubeconfig.VerifyEndpoint(cc.Name, hostname, port)
|
||||
if err != nil {
|
||||
glog.Errorf("kubeconfig endpoint: %v", err)
|
||||
st.Kubeconfig = Misconfigured
|
||||
}
|
||||
}
|
||||
|
||||
sta, err := kverify.APIServerStatus(cr, hostname, port)
|
||||
glog.Infof("%s apiserver status = %s (err=%v)", name, stk, err)
|
||||
|
||||
if err != nil {
|
||||
|
@ -242,6 +252,9 @@ For the list accessible variables for the template, see the struct values here:
|
|||
|
||||
func statusText(st *Status, w io.Writer) error {
|
||||
tmpl, err := template.New("status").Parse(statusFormat)
|
||||
if st.Worker && statusFormat == defaultStatusFormat {
|
||||
tmpl, err = template.New("worker-status").Parse(workerStatusFormat)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -51,18 +51,18 @@ func TestStatusText(t *testing.T) {
|
|||
}{
|
||||
{
|
||||
name: "ok",
|
||||
state: &Status{Host: "Running", Kubelet: "Running", APIServer: "Running", Kubeconfig: Configured},
|
||||
want: "host: Running\nkubelet: Running\napiserver: Running\nkubeconfig: Configured\n",
|
||||
state: &Status{Name: "minikube", Host: "Running", Kubelet: "Running", APIServer: "Running", Kubeconfig: Configured},
|
||||
want: "minikube\nhost: Running\nkubelet: Running\napiserver: Running\nkubeconfig: Configured\n\n",
|
||||
},
|
||||
{
|
||||
name: "paused",
|
||||
state: &Status{Host: "Running", Kubelet: "Stopped", APIServer: "Paused", Kubeconfig: Configured},
|
||||
want: "host: Running\nkubelet: Stopped\napiserver: Paused\nkubeconfig: Configured\n",
|
||||
state: &Status{Name: "minikube", Host: "Running", Kubelet: "Stopped", APIServer: "Paused", Kubeconfig: Configured},
|
||||
want: "minikube\nhost: Running\nkubelet: Stopped\napiserver: Paused\nkubeconfig: Configured\n\n",
|
||||
},
|
||||
{
|
||||
name: "down",
|
||||
state: &Status{Host: "Stopped", Kubelet: "Stopped", APIServer: "Stopped", Kubeconfig: Misconfigured},
|
||||
want: "host: Stopped\nkubelet: Stopped\napiserver: Stopped\nkubeconfig: Misconfigured\n\nWARNING: Your kubectl is pointing to stale minikube-vm.\nTo fix the kubectl context, run `minikube update-context`\n",
|
||||
state: &Status{Name: "minikube", Host: "Stopped", Kubelet: "Stopped", APIServer: "Stopped", Kubeconfig: Misconfigured},
|
||||
want: "minikube\nhost: Stopped\nkubelet: Stopped\napiserver: Stopped\nkubeconfig: Misconfigured\n\n\nWARNING: Your kubectl is pointing to stale minikube-vm.\nTo fix the kubectl context, run `minikube update-context`\n",
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
|
|
|
@ -24,13 +24,12 @@ import (
|
|||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
pkg_config "k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/kubeconfig"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
"k8s.io/minikube/pkg/util/retry"
|
||||
)
|
||||
|
@ -46,17 +45,10 @@ itself, leaving all files intact. The cluster can be started again with the "sta
|
|||
|
||||
// runStop handles the executes the flow of "minikube stop"
|
||||
func runStop(cmd *cobra.Command, args []string) {
|
||||
profile := viper.GetString(pkg_config.ProfileName)
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
exit.WithError("Error getting client", err)
|
||||
}
|
||||
defer api.Close()
|
||||
cname := ClusterFlagValue()
|
||||
|
||||
cc, err := config.Load(profile)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting cluster config", err)
|
||||
}
|
||||
api, cc := mustload.Partial(cname)
|
||||
defer api.Close()
|
||||
|
||||
for _, n := range cc.Nodes {
|
||||
nonexistent := stop(api, *cc, n)
|
||||
|
@ -67,19 +59,19 @@ func runStop(cmd *cobra.Command, args []string) {
|
|||
}
|
||||
|
||||
if err := killMountProcess(); err != nil {
|
||||
out.T(out.Warning, "Unable to kill mount process: {{.error}}", out.V{"error": err})
|
||||
out.WarningT("Unable to kill mount process: {{.error}}", out.V{"error": err})
|
||||
}
|
||||
|
||||
err = kubeconfig.UnsetCurrentContext(profile, kubeconfig.PathFromEnv())
|
||||
if err != nil {
|
||||
if err := kubeconfig.UnsetCurrentContext(cname, kubeconfig.PathFromEnv()); err != nil {
|
||||
exit.WithError("update config", err)
|
||||
}
|
||||
}
|
||||
|
||||
func stop(api libmachine.API, cluster config.ClusterConfig, n config.Node) bool {
|
||||
nonexistent := false
|
||||
stop := func() (err error) {
|
||||
machineName := driver.MachineName(cluster, n)
|
||||
machineName := driver.MachineName(cluster, n)
|
||||
|
||||
tryStop := func() (err error) {
|
||||
err = machine.StopHost(api, machineName)
|
||||
if err == nil {
|
||||
return nil
|
||||
|
@ -88,7 +80,7 @@ func stop(api libmachine.API, cluster config.ClusterConfig, n config.Node) bool
|
|||
|
||||
switch err := errors.Cause(err).(type) {
|
||||
case mcnerror.ErrHostDoesNotExist:
|
||||
out.T(out.Meh, `"{{.profile_name}}" does not exist, nothing to stop`, out.V{"profile_name": cluster})
|
||||
out.T(out.Meh, `"{{.machineName}}" does not exist, nothing to stop`, out.V{"machineName": machineName})
|
||||
nonexistent = true
|
||||
return nil
|
||||
default:
|
||||
|
@ -96,7 +88,7 @@ func stop(api libmachine.API, cluster config.ClusterConfig, n config.Node) bool
|
|||
}
|
||||
}
|
||||
|
||||
if err := retry.Expo(stop, 5*time.Second, 3*time.Minute, 5); err != nil {
|
||||
if err := retry.Expo(tryStop, 1*time.Second, 120*time.Second, 5); err != nil {
|
||||
exit.WithError("Unable to stop VM", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -27,13 +27,12 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"k8s.io/minikube/pkg/drivers/kic/oci"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/localpath"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
"k8s.io/minikube/pkg/minikube/service"
|
||||
"k8s.io/minikube/pkg/minikube/tunnel"
|
||||
"k8s.io/minikube/pkg/minikube/tunnel/kic"
|
||||
|
@ -51,6 +50,8 @@ var tunnelCmd = &cobra.Command{
|
|||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
manager := tunnel.NewManager()
|
||||
cname := ClusterFlagValue()
|
||||
co := mustload.Healthy(cname)
|
||||
|
||||
if cleanup {
|
||||
glog.Info("Checking for tunnels to cleanup...")
|
||||
|
@ -60,13 +61,6 @@ var tunnelCmd = &cobra.Command{
|
|||
return
|
||||
}
|
||||
|
||||
glog.Infof("Creating docker machine client...")
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
exit.WithError("error creating machine client", err)
|
||||
}
|
||||
glog.Infof("Creating k8s client...")
|
||||
|
||||
// Tunnel uses the k8s clientset to query the API server for services in the LoadBalancerEmulator.
|
||||
// We define the tunnel and minikube error free if the API server responds within a second.
|
||||
// This also contributes to better UX, the tunnel status check can happen every second and
|
||||
|
@ -76,11 +70,6 @@ var tunnelCmd = &cobra.Command{
|
|||
exit.WithError("error creating clientset", err)
|
||||
}
|
||||
|
||||
cfg, err := config.Load(viper.GetString(config.ProfileName))
|
||||
if err != nil {
|
||||
exit.WithError("Error getting config", err)
|
||||
}
|
||||
|
||||
ctrlC := make(chan os.Signal, 1)
|
||||
signal.Notify(ctrlC, os.Interrupt)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
@ -89,13 +78,13 @@ var tunnelCmd = &cobra.Command{
|
|||
cancel()
|
||||
}()
|
||||
|
||||
if runtime.GOOS == "darwin" && cfg.Driver == oci.Docker {
|
||||
port, err := oci.ForwardedPort(oci.Docker, cfg.Name, 22)
|
||||
if runtime.GOOS == "darwin" && co.Config.Driver == oci.Docker {
|
||||
port, err := oci.ForwardedPort(oci.Docker, cname, 22)
|
||||
if err != nil {
|
||||
exit.WithError("error getting ssh port", err)
|
||||
}
|
||||
sshPort := strconv.Itoa(port)
|
||||
sshKey := filepath.Join(localpath.MiniPath(), "machines", cfg.Name, "id_rsa")
|
||||
sshKey := filepath.Join(localpath.MiniPath(), "machines", cname, "id_rsa")
|
||||
|
||||
kicSSHTunnel := kic.NewSSHTunnel(ctx, sshPort, sshKey, clientset.CoreV1())
|
||||
err = kicSSHTunnel.Start()
|
||||
|
@ -106,7 +95,7 @@ var tunnelCmd = &cobra.Command{
|
|||
return
|
||||
}
|
||||
|
||||
done, err := manager.StartTunnel(ctx, cfg.Name, api, config.DefaultLoader, clientset.CoreV1())
|
||||
done, err := manager.StartTunnel(ctx, cname, co.API, config.DefaultLoader, clientset.CoreV1())
|
||||
if err != nil {
|
||||
exit.WithError("error starting tunnel", err)
|
||||
}
|
||||
|
|
|
@ -17,7 +17,6 @@ limitations under the License.
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
@ -25,11 +24,12 @@ import (
|
|||
"github.com/spf13/viper"
|
||||
|
||||
"k8s.io/minikube/pkg/minikube/cluster"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
)
|
||||
|
||||
|
@ -38,27 +38,12 @@ var unpauseCmd = &cobra.Command{
|
|||
Use: "unpause",
|
||||
Short: "unpause Kubernetes",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
cname := viper.GetString(config.ProfileName)
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
exit.WithError("Error getting client", err)
|
||||
}
|
||||
defer api.Close()
|
||||
cc, err := config.Load(cname)
|
||||
cname := ClusterFlagValue()
|
||||
co := mustload.Running(cname)
|
||||
|
||||
if err != nil && !config.IsNotExist(err) {
|
||||
exit.WithError("Error loading profile config", err)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
out.ErrT(out.Meh, `"{{.name}}" profile does not exist`, out.V{"name": cname})
|
||||
os.Exit(1)
|
||||
}
|
||||
glog.Infof("config: %+v", cc)
|
||||
|
||||
for _, n := range cc.Nodes {
|
||||
machineName := driver.MachineName(*cc, n)
|
||||
host, err := machine.LoadHost(api, machineName)
|
||||
for _, n := range co.Config.Nodes {
|
||||
machineName := driver.MachineName(*co.Config, n)
|
||||
host, err := machine.LoadHost(co.API, machineName)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting host", err)
|
||||
}
|
||||
|
@ -68,7 +53,7 @@ var unpauseCmd = &cobra.Command{
|
|||
exit.WithError("Failed to get command runner", err)
|
||||
}
|
||||
|
||||
cr, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime, Runner: r})
|
||||
cr, err := cruntime.New(cruntime.Config{Type: co.Config.KubernetesConfig.ContainerRuntime, Runner: r})
|
||||
if err != nil {
|
||||
exit.WithError("Failed runtime", err)
|
||||
}
|
||||
|
@ -98,6 +83,6 @@ var unpauseCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
func init() {
|
||||
unpauseCmd.Flags().StringSliceVarP(&namespaces, "--namespaces", "n", cluster.DefaultNamespaces, "namespaces to unpause")
|
||||
unpauseCmd.Flags().StringSliceVarP(&namespaces, "--namespaces", "n", constants.DefaultNamespaces, "namespaces to unpause")
|
||||
unpauseCmd.Flags().BoolVarP(&allNamespaces, "all-namespaces", "A", false, "If set, unpause all namespaces")
|
||||
}
|
||||
|
|
|
@ -18,12 +18,9 @@ package cmd
|
|||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/pkg/minikube/cluster"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/kubeconfig"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
)
|
||||
|
||||
|
@ -34,24 +31,17 @@ var updateContextCmd = &cobra.Command{
|
|||
Long: `Retrieves the IP address of the running cluster, checks it
|
||||
with IP in kubeconfig, and corrects kubeconfig if incorrect.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
exit.WithError("Error getting client", err)
|
||||
}
|
||||
defer api.Close()
|
||||
machineName := viper.GetString(config.ProfileName)
|
||||
ip, err := cluster.GetHostDriverIP(api, machineName)
|
||||
if err != nil {
|
||||
exit.WithError("Error host driver ip status", err)
|
||||
}
|
||||
updated, err := kubeconfig.UpdateIP(ip, machineName, kubeconfig.PathFromEnv())
|
||||
cname := ClusterFlagValue()
|
||||
co := mustload.Running(cname)
|
||||
|
||||
updated, err := kubeconfig.UpdateEndpoint(cname, co.CP.Hostname, co.CP.Port, kubeconfig.PathFromEnv())
|
||||
if err != nil {
|
||||
exit.WithError("update config", err)
|
||||
}
|
||||
if updated {
|
||||
out.T(out.Celebrate, "{{.machine}} IP has been updated to point at {{.ip}}", out.V{"machine": machineName, "ip": ip})
|
||||
out.T(out.Celebrate, `"{{.context}}" context has been updated to point to {{.hostname}}:{{.port}}`, out.V{"context": cname, "hostname": co.CP.Hostname, "port": co.CP.Port})
|
||||
} else {
|
||||
out.T(out.Meh, "{{.machine}} IP was already correctly configured for {{.ip}}", out.V{"machine": machineName, "ip": ip})
|
||||
out.T(out.Meh, `No changes required for the "{{.context}}" context`, out.V{"context": cname})
|
||||
}
|
||||
|
||||
},
|
||||
|
|
|
@ -17,20 +17,56 @@ limitations under the License.
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v2"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
"k8s.io/minikube/pkg/version"
|
||||
)
|
||||
|
||||
var (
|
||||
versionOutput string
|
||||
shortVersion bool
|
||||
)
|
||||
|
||||
var versionCmd = &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Print the version of minikube",
|
||||
Long: `Print the version of minikube.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
out.Ln("minikube version: %v", version.GetVersion())
|
||||
minikubeVersion := version.GetVersion()
|
||||
gitCommitID := version.GetGitCommitID()
|
||||
if gitCommitID != "" {
|
||||
out.Ln("commit: %v", gitCommitID)
|
||||
data := map[string]string{
|
||||
"minikubeVersion": minikubeVersion,
|
||||
"commit": gitCommitID,
|
||||
}
|
||||
switch versionOutput {
|
||||
case "":
|
||||
out.Ln("minikube version: %v", minikubeVersion)
|
||||
if !shortVersion && gitCommitID != "" {
|
||||
out.Ln("commit: %v", gitCommitID)
|
||||
}
|
||||
case "json":
|
||||
json, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
exit.WithError("version json failure", err)
|
||||
}
|
||||
out.Ln(string(json))
|
||||
case "yaml":
|
||||
yaml, err := yaml.Marshal(data)
|
||||
if err != nil {
|
||||
exit.WithError("version yaml failure", err)
|
||||
}
|
||||
out.Ln(string(yaml))
|
||||
default:
|
||||
exit.WithCodeT(exit.BadUsage, "error: --output must be 'yaml' or 'json'")
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
versionCmd.Flags().StringVarP(&versionOutput, "output", "o", "", "One of 'yaml' or 'json'.")
|
||||
versionCmd.Flags().BoolVar(&shortVersion, "short", false, "Print just the version number.")
|
||||
}
|
||||
|
|
|
@ -90,7 +90,7 @@ spec:
|
|||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
# WARNING: This must match pkg/minikube/bootstrapper/images/images.go
|
||||
image: kubernetesui/dashboard:v2.0.0-beta8
|
||||
image: kubernetesui/dashboard:v2.0.0-rc6
|
||||
ports:
|
||||
- containerPort: 9090
|
||||
protocol: TCP
|
||||
|
|
|
@ -42,26 +42,20 @@ spec:
|
|||
- name: device-plugin
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/device-plugins
|
||||
- name: dev
|
||||
hostPath:
|
||||
path: /dev
|
||||
containers:
|
||||
- image: "{{default "k8s.gcr.io" .ImageRepository}}/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e"
|
||||
command: ["/usr/bin/nvidia-gpu-device-plugin", "-logtostderr"]
|
||||
- image: "nvidia/k8s-device-plugin:1.0.0-beta4"
|
||||
command: ["/usr/bin/nvidia-device-plugin", "-logtostderr"]
|
||||
name: nvidia-gpu-device-plugin
|
||||
resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 10Mi
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: 10Mi
|
||||
securityContext:
|
||||
privileged: true
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop: ["ALL"]
|
||||
volumeMounts:
|
||||
- name: device-plugin
|
||||
mountPath: /device-plugin
|
||||
- name: dev
|
||||
mountPath: /dev
|
||||
mountPath: /var/lib/kubelet/device-plugins
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
|
@ -46,7 +46,7 @@ spec:
|
|||
value: kube-system
|
||||
- name: TILLER_HISTORY_MAX
|
||||
value: "0"
|
||||
image: gcr.io/kubernetes-helm/tiller:v2.16.1
|
||||
image: gcr.io/kubernetes-helm/tiller:v2.16.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
|
|
|
@ -11,49 +11,24 @@ metadata:
|
|||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: istiocontrolplanes.install.istio.io
|
||||
name: istiooperators.install.istio.io
|
||||
labels:
|
||||
kubernetes.io/minikube-addons: istio
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
spec:
|
||||
group: install.istio.io
|
||||
names:
|
||||
kind: IstioControlPlane
|
||||
listKind: IstioControlPlaneList
|
||||
plural: istiocontrolplanes
|
||||
singular: istiocontrolplane
|
||||
kind: IstioOperator
|
||||
listKind: IstioOperatorList
|
||||
plural: istiooperators
|
||||
singular: istiooperator
|
||||
shortNames:
|
||||
- icp
|
||||
- iop
|
||||
scope: Namespaced
|
||||
subresources:
|
||||
status: {}
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values.
|
||||
More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase.
|
||||
More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
spec:
|
||||
description: 'Specification of the desired state of the istio control plane resource.
|
||||
More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status'
|
||||
type: object
|
||||
status:
|
||||
description: 'Status describes each of istio control plane component status at the current time.
|
||||
0 means NONE, 1 means UPDATING, 2 means HEALTHY, 3 means ERROR, 4 means RECONCILING.
|
||||
More info: https://github.com/istio/operator/blob/master/pkg/apis/istio/v1alpha2/v1alpha2.pb.html &
|
||||
https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status'
|
||||
type: object
|
||||
versions:
|
||||
- name: v1alpha2
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
...
|
||||
|
@ -243,9 +218,9 @@ spec:
|
|||
serviceAccountName: istio-operator
|
||||
containers:
|
||||
- name: istio-operator
|
||||
image: docker.io/istio/operator:1.4.0
|
||||
image: docker.io/istio/operator:1.5.0
|
||||
command:
|
||||
- istio-operator
|
||||
- operator
|
||||
- server
|
||||
imagePullPolicy: Always
|
||||
resources:
|
||||
|
@ -257,7 +232,7 @@ spec:
|
|||
memory: 128Mi
|
||||
env:
|
||||
- name: WATCH_NAMESPACE
|
||||
value: ""
|
||||
value: "istio-system"
|
||||
- name: LEADER_ELECTION_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
|
|
|
@ -3,9 +3,10 @@
|
|||
|
||||
### Enable istio on minikube
|
||||
Make sure to start minikube with at least 8192 MB of memory and 4 CPUs.
|
||||
See official [Platform Setup](https://istio.io/docs/setup/platform-setup/) documentation.
|
||||
|
||||
```shell script
|
||||
minikube start --memory=8000mb --cpus=4
|
||||
minikube start --memory=8192mb --cpus=4
|
||||
```
|
||||
|
||||
To enable this addon, simply run:
|
||||
|
|
|
@ -1,10 +1,19 @@
|
|||
apiVersion: install.istio.io/v1alpha2
|
||||
kind: IstioControlPlane
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
namespace: istio-operator
|
||||
name: istio-system
|
||||
labels:
|
||||
kubernetes.io/minikube-addons: istio
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
|
||||
---
|
||||
apiVersion: install.istio.io/v1alpha1
|
||||
kind: IstioOperator
|
||||
metadata:
|
||||
namespace: istio-system
|
||||
name: example-istiocontrolplane
|
||||
labels:
|
||||
kubernetes.io/minikube-addons: istio
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
profile: default
|
||||
|
|
|
@ -0,0 +1,149 @@
|
|||
# Minikube Registry Aliases Addon
|
||||
|
||||
An addon to minikube that can help push and pull from the minikube registry using custom domain names. The custom domain names will be made resolveable from with in cluster and at minikube node.
|
||||
|
||||
## How to use ?
|
||||
|
||||
### Start minikube
|
||||
|
||||
```shell
|
||||
minikube start -p demo
|
||||
```
|
||||
This addon depends on `registry` addon, it need to be enabled before the alias addon is installed:
|
||||
|
||||
### Enable internal registry
|
||||
|
||||
```shell
|
||||
minikube addons enable registry
|
||||
```
|
||||
|
||||
Verifying the registry deployment
|
||||
|
||||
```shell
|
||||
watch kubectl get pods -n kube-system
|
||||
```
|
||||
|
||||
```shell
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
coredns-6955765f44-kpbzt 1/1 Running 0 16m
|
||||
coredns-6955765f44-lzlsv 1/1 Running 0 16m
|
||||
etcd-demo 1/1 Running 0 16m
|
||||
kube-apiserver-demo 1/1 Running 0 16m
|
||||
kube-controller-manager-demo 1/1 Running 0 16m
|
||||
kube-proxy-q8rb9 1/1 Running 0 16m
|
||||
kube-scheduler-demo 1/1 Running 0 16m
|
||||
*registry-4k8zs* 1/1 Running 0 40s
|
||||
registry-proxy-vs8jt 1/1 Running 0 40s
|
||||
storage-provisioner 1/1 Running 0 16m
|
||||
```
|
||||
|
||||
```shell
|
||||
kubectl get svc -n kube-system
|
||||
```
|
||||
|
||||
```shell
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
kube-dns ClusterIP 10.96.0.10 <none> 53/UDP,53/TCP,9153/TCP 17m
|
||||
registry ClusterIP 10.97.247.75 <none> 80/TCP 94s
|
||||
```
|
||||
|
||||
>
|
||||
> **NOTE:**
|
||||
> Please make a note of the CLUSTER-IP of `registry` service
|
||||
|
||||
### Enable registry aliases addon
|
||||
|
||||
```shell
|
||||
minikube addons enable registry-aliases
|
||||
🌟 The 'registry-aliases' addon is enabled
|
||||
```
|
||||
|
||||
You can check the mikikube vm's `/etc/hosts` file for the registry aliases entries:
|
||||
|
||||
```shell
|
||||
watch minikube ssh -- cat /etc/hosts
|
||||
```
|
||||
|
||||
```shell
|
||||
127.0.0.1 localhost
|
||||
127.0.1.1 demo
|
||||
10.97.247.75 example.org
|
||||
10.97.247.75 example.com
|
||||
10.97.247.75 test.com
|
||||
10.97.247.75 test.org
|
||||
```
|
||||
|
||||
The above output shows that the Daemonset has added the `registryAliases` from the ConfigMap pointing to the internal registry's __CLUSTER-IP__.
|
||||
|
||||
### Update CoreDNS
|
||||
|
||||
The coreDNS would have been automatically updated by the patch-coredns. A successful job run will have coredns ConfigMap updated like:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
data:
|
||||
Corefile: |-
|
||||
.:53 {
|
||||
errors
|
||||
health
|
||||
rewrite name example.com registry.kube-system.svc.cluster.local
|
||||
rewrite name example.org registry.kube-system.svc.cluster.local
|
||||
rewrite name test.com registry.kube-system.svc.cluster.local
|
||||
rewrite name test.org registry.kube-system.svc.cluster.local
|
||||
kubernetes cluster.local in-addr.arpa ip6.arpa {
|
||||
pods insecure
|
||||
upstream
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
}
|
||||
prometheus :9153
|
||||
proxy . /etc/resolv.conf
|
||||
cache 30
|
||||
loop
|
||||
reload
|
||||
loadbalance
|
||||
}
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: coredns
|
||||
```
|
||||
|
||||
To verify it run the following command:
|
||||
|
||||
```shell
|
||||
kubectl get cm -n kube-system coredns -o yaml
|
||||
```
|
||||
|
||||
Once you have successfully patched you can now push and pull from the registry using suffix `example.com`, `example.org`,`test.com` and `test.org`.
|
||||
|
||||
The successful run will show the following extra pods (Daemonset, Job) in `kube-system` namespace:
|
||||
|
||||
```shell
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
registry-aliases-hosts-update-995vx 1/1 Running 0 47s
|
||||
registry-aliases-patch-core-dns-zsxfc 0/1 Completed 0 47s
|
||||
```
|
||||
|
||||
## Verify with sample application
|
||||
|
||||
You can verify the deployment end to end using the example [application](https://github.com/kameshsampath/minikube-registry-aliases-demo).
|
||||
|
||||
```shell
|
||||
git clone https://github.com/kameshsampath/minikube-registry-aliases-demo
|
||||
cd minikube-registry-aliases-demo
|
||||
```
|
||||
|
||||
Make sure you set the docker context using `eval $(minikube -p demo docker-env)`
|
||||
|
||||
Deploy the application using [Skaffold](https://skaffold.dev):
|
||||
|
||||
```shell
|
||||
skaffold dev --port-forward
|
||||
```
|
||||
|
||||
Once the application is running try doing `curl localhost:8080` to see the `Hello World` response
|
||||
|
||||
You can also update [skaffold.yaml](./skaffold.yaml) and [app.yaml](.k8s/app.yaml), to use `test.org`, `test.com` or `example.org` as container registry urls, and see all the container image names resolves to internal registry, resulting in successful build and deployment.
|
||||
|
||||
> **NOTE**:
|
||||
>
|
||||
> You can also update [skaffold.yaml](./skaffold.yaml) and [app. yaml](.k8s/app.yaml), to use `test.org`, `test.com` or > `example.org` as container registry urls, and see all the > container image names resolves to internal registry, resulting in successful build and deployment.
|
|
@ -0,0 +1,51 @@
|
|||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: registry-aliases-hosts-update
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/minikube-addons: registry-aliases
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: registry-aliases-hosts-update
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: registry-aliases-hosts-update
|
||||
spec:
|
||||
initContainers:
|
||||
- name: update
|
||||
image: registry.fedoraproject.org/fedora
|
||||
volumeMounts:
|
||||
- name: etchosts
|
||||
mountPath: /host-etc/hosts
|
||||
readOnly: false
|
||||
env:
|
||||
- name: REGISTRY_ALIASES
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: registry-aliases
|
||||
key: registryAliases
|
||||
command:
|
||||
- bash
|
||||
- -ce
|
||||
- |
|
||||
NL=$'\n'
|
||||
TAB=$'\t'
|
||||
HOSTS="$(cat /host-etc/hosts)"
|
||||
[ -z "$REGISTRY_SERVICE_HOST" ] && echo "Failed to get hosts entry for default registry" && exit 1;
|
||||
for H in $REGISTRY_ALIASES; do
|
||||
echo "$HOSTS" | grep "$H" || HOSTS="$HOSTS$NL$REGISTRY_SERVICE_HOST$TAB$H";
|
||||
done;
|
||||
echo "$HOSTS" | diff -u /host-etc/hosts - || echo "$HOSTS" > /host-etc/hosts
|
||||
echo "Done."
|
||||
containers:
|
||||
- name: pause-for-update
|
||||
image: gcr.io/google_containers/pause-amd64:3.1
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- name: etchosts
|
||||
hostPath:
|
||||
path: /etc/hosts
|
|
@ -0,0 +1,26 @@
|
|||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: registry-aliases-patch-core-dns
|
||||
namespace: kube-system
|
||||
spec:
|
||||
ttlSecondsAfterFinished: 100
|
||||
template:
|
||||
spec:
|
||||
serviceAccountName: registry-aliases-sa
|
||||
volumes:
|
||||
- name: minikube
|
||||
hostPath:
|
||||
path: /var/lib/minikube/binaries
|
||||
containers:
|
||||
- name: core-dns-patcher
|
||||
image: quay.io/rhdevelopers/core-dns-patcher
|
||||
imagePullPolicy: IfNotPresent
|
||||
# using the kubectl from the minikube instance
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/minikube/binaries
|
||||
name: minikube
|
||||
readOnly: true
|
||||
restartPolicy: Never
|
||||
backoffLimit: 4
|
|
@ -0,0 +1,18 @@
|
|||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: registry-aliases
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/minikube-addons: registry-aliases
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
data:
|
||||
# Add additonal hosts seperated by new-line
|
||||
registryAliases: >-
|
||||
example.org
|
||||
example.com
|
||||
test.com
|
||||
test.org
|
||||
# default registry address in minikube when enabled via minikube addons enable registry
|
||||
registrySvc: registry.kube-system.svc.cluster.local
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: registry-aliases-crb
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: registry-aliases-sa
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
apiGroup: rbac.authorization.k8s.io
|
|
@ -0,0 +1,5 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: registry-aliases-sa
|
||||
namespace: kube-system
|
|
@ -1,4 +1,5 @@
|
|||
# CONFIG_LOCALVERSION_AUTO is not set
|
||||
CONFIG_KERNEL_LZ4=y
|
||||
CONFIG_SYSVIPC=y
|
||||
CONFIG_POSIX_MQUEUE=y
|
||||
CONFIG_AUDIT=y
|
||||
|
@ -25,10 +26,10 @@ CONFIG_CPUSETS=y
|
|||
CONFIG_CGROUP_DEVICE=y
|
||||
CONFIG_CGROUP_CPUACCT=y
|
||||
CONFIG_CGROUP_PERF=y
|
||||
CONFIG_CGROUP_BPF=y
|
||||
CONFIG_USER_NS=y
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_BPF_SYSCALL=y
|
||||
CONFIG_CGROUP_BPF=y
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
CONFIG_PROFILING=y
|
||||
CONFIG_SMP=y
|
||||
|
@ -270,12 +271,14 @@ CONFIG_BRIDGE_EBT_LOG=m
|
|||
CONFIG_BRIDGE_EBT_NFLOG=m
|
||||
CONFIG_BRIDGE=m
|
||||
CONFIG_NET_SCHED=y
|
||||
CONFIG_NET_SCH_TBF=y
|
||||
CONFIG_NET_SCH_NETEM=y
|
||||
CONFIG_NET_SCH_INGRESS=m
|
||||
CONFIG_NET_CLS_U32=m
|
||||
CONFIG_NET_CLS_CGROUP=y
|
||||
CONFIG_NET_CLS_BPF=m
|
||||
CONFIG_NET_EMATCH=y
|
||||
CONFIG_NET_EMATCH_IPSET=y
|
||||
CONFIG_NET_CLS_ACT=y
|
||||
CONFIG_NET_ACT_MIRRED=m
|
||||
CONFIG_NET_ACT_BPF=m
|
||||
|
@ -380,6 +383,7 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y
|
|||
CONFIG_SERIAL_8250_DETECT_IRQ=y
|
||||
CONFIG_SERIAL_8250_RSA=y
|
||||
CONFIG_HW_RANDOM=y
|
||||
CONFIG_HW_RANDOM_VIRTIO=y
|
||||
# CONFIG_HW_RANDOM_INTEL is not set
|
||||
# CONFIG_HW_RANDOM_AMD is not set
|
||||
CONFIG_NVRAM=y
|
||||
|
@ -504,3 +508,5 @@ CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
|
|||
CONFIG_EARLY_PRINTK_DBGP=y
|
||||
CONFIG_DEBUG_BOOT_PARAMS=y
|
||||
CONFIG_OPTIMIZE_INLINING=y
|
||||
CONFIG_TRANSPARENT_HUGEPAGE=y
|
||||
CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
|
||||
|
|
|
@ -11,3 +11,4 @@ sha256 70d4c746fe207422c78420dc4239768f485eea639a38c993c02872ec6305dd1d v1.15.2.
|
|||
sha256 05f9614c4d5970b4662499b84c270b0ab953596ee863dcd09c9dc7a2d2f09789 v1.16.0.tar.gz
|
||||
sha256 57e1ee990ef2d5af8b32c33a21b4998682608e3556dcf1d3349666f55e7d95b9 v1.16.1.tar.gz
|
||||
sha256 23a797762e4544ee7c171ef138cfc1141a3f0acc2838d9965c2a58e53b16c3ae v1.17.0.tar.gz
|
||||
sha256 7967e9218fdfb59d6005a9e19c1668469bc5566c2a35927cffe7de8656bb22c7 v1.17.1.tar.gz
|
||||
|
|
|
@ -4,8 +4,8 @@
|
|||
#
|
||||
################################################################################
|
||||
|
||||
CRIO_BIN_VERSION = v1.17.0
|
||||
CRIO_BIN_COMMIT = 6d0ffae63b9b7d8f07e7f9cf50736a67fb31faf3
|
||||
CRIO_BIN_VERSION = v1.17.1
|
||||
CRIO_BIN_COMMIT = ee2de87bd8e2a7a84799476cb4fc4ce8a78fdf6d
|
||||
CRIO_BIN_SITE = https://github.com/cri-o/cri-o/archive
|
||||
CRIO_BIN_SOURCE = $(CRIO_BIN_VERSION).tar.gz
|
||||
CRIO_BIN_DEPENDENCIES = host-go libgpgme
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
# falco
|
||||
sha256 87c60273c35d544256e471b403497be33f24df662673338236ec92ba3fc1f8b7 0.19.0.tar.gz
|
||||
sha256 b873e3590e56ead740ed905108221f98da6100da3c5b7acf2355ea1cf628d931 0.20.0.tar.gz
|
||||
sha256 b1c9884855d58be94a97b2e348bcdc7db995800f0405b0f4e9a7176ee2f094a7 0.21.0.tar.gz
|
||||
# sysdig
|
||||
sha256 6e477ac5fe9d3110b870bd4495f01541373a008c375a1934a2d1c46798b6bad6 146a431edf95829ac11bfd9c85ba3ef08789bffe.tar.gz
|
||||
sha256 1c69363e4c36cdaeed413c2ef557af53bfc4bf1109fbcb6d6e18dc40fe6ddec8 be1ea2d9482d0e6e2cb14a0fd7e08cbecf517f94.tar.gz
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
#
|
||||
########################################################################
|
||||
|
||||
FALCO_PROBE_VERSION = 0.20.0
|
||||
FALCO_PROBE_VERSION = 0.21.0
|
||||
FALCO_PROBE_SITE = https://github.com/falcosecurity/falco/archive
|
||||
FALCO_PROBE_SOURCE = $(FALCO_PROBE_VERSION).tar.gz
|
||||
FALCO_PROBE_DEPENDENCIES += ncurses libyaml
|
||||
|
@ -12,7 +12,7 @@ FALCO_PROBE_LICENSE = Apache-2.0
|
|||
FALCO_PROBE_LICENSE_FILES = COPYING
|
||||
|
||||
# see cmake/modules/sysdig-repo/CMakeLists.txt
|
||||
FALCO_PROBE_SYSDIG_VERSION = 146a431edf95829ac11bfd9c85ba3ef08789bffe
|
||||
FALCO_PROBE_SYSDIG_VERSION = be1ea2d9482d0e6e2cb14a0fd7e08cbecf517f94
|
||||
FALCO_PROBE_EXTRA_DOWNLOADS = https://github.com/draios/sysdig/archive/${FALCO_PROBE_SYSDIG_VERSION}.tar.gz
|
||||
|
||||
define FALCO_PROBE_SYSDIG_SRC
|
||||
|
|
|
@ -12,3 +12,4 @@ sha256 2e027c1b935f3a03f27ef7f17823ccf334607a17d033d4ce53a90b98294e7f68 v1.4.4.t
|
|||
sha256 61b44b739c485125f179044f7aa7dc58c820f771bce4ce495fa555a38dc68b57 v1.6.3.tar.gz
|
||||
sha256 6e59821320b435543bc7554e73faa66d5956e4ad3f7e7f4ea03bebd6726758e9 v1.6.4.tar.gz
|
||||
sha256 50960293c2019e38ce69e4cf5f0a683e7fea1562b180e38e38c9355fcd7c4f0d v1.6.5.tar.gz
|
||||
sha256 69f7ff81da1510ebf2962c1de3170675ca3cd8a24bc00c93742a24bcce17c752 v1.8.2.tar.gz
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
PODMAN_VERSION = v1.6.5
|
||||
PODMAN_COMMIT = 45e7be192ef99e870c59a1cd2c1fa7940b0af2d6
|
||||
PODMAN_VERSION = v1.8.2
|
||||
PODMAN_COMMIT = 028e3317eb1494b9b2acba4a0a295df80fae66cc
|
||||
PODMAN_SITE = https://github.com/containers/libpod/archive
|
||||
PODMAN_SOURCE = $(PODMAN_VERSION).tar.gz
|
||||
PODMAN_LICENSE = Apache-2.0
|
||||
|
|
|
@ -26,7 +26,6 @@ import (
|
|||
"testing"
|
||||
|
||||
retryablehttp "github.com/hashicorp/go-retryablehttp"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/notify"
|
||||
"k8s.io/minikube/pkg/util"
|
||||
)
|
||||
|
|
|
@ -1,4 +1,28 @@
|
|||
[
|
||||
{
|
||||
"name": "v1.9.2",
|
||||
"checksums": {
|
||||
"darwin": "f27016246850b3145e1509e98f7ed060fd9575ac4d455c7bdc15277734372e85",
|
||||
"linux": "3121f933bf8d608befb24628a045ce536658738c14618504ba46c92e656ea6b5",
|
||||
"windows": "426586f33d88a484fdc5a3b326b0651d57860e9305a4f9d4180640e3beccaf6b"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "v1.9.1",
|
||||
"checksums": {
|
||||
"darwin": "ac8855ea54e798fa6f00e8c251b55c3d2a54e3b80e896162958a5ac7b0e3f60b",
|
||||
"linux": "7174c881289a7302a05d477c67cc1ef5b48153e825089d6c0d0bcfaebe33d42a",
|
||||
"windows": "91d15b2ef8f357aa463ae16de59f6e018120398f492ba4e35cd77f21acb27d5c"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "v1.9.0",
|
||||
"checksums": {
|
||||
"darwin": "2a074b0d842e3d9272444990374c6ffc51878c2d11c0434f54e15269b59593f9",
|
||||
"linux": "81d77d1babe63be393e0a3204aac7825eb35e0fdf58ffefd9f66508a43864866",
|
||||
"windows": "d11a957704c23670eac453a47897449a2aaab13b7dcd6424307f8932ac9f81bb"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "v1.8.2",
|
||||
"checksums": {
|
||||
|
|
|
@ -1,49 +0,0 @@
|
|||
# Advanced Topics and Tutorials
|
||||
|
||||
## Cluster Configuration
|
||||
|
||||
* **Alternative Runtimes** ([alternative_runtimes.md](alternative_runtimes.md)): How to run minikube without Docker as the container runtime
|
||||
|
||||
* **Environment Variables** ([env_vars.md](env_vars.md)): The different environment variables that minikube understands
|
||||
|
||||
* **Minikube Addons** ([addons.md](addons.md)): Information on configuring addons to be run on minikube
|
||||
|
||||
* **Configuring Kubernetes** ([configuring_kubernetes.md](configuring_kubernetes.md)): Configuring different Kubernetes components in minikube
|
||||
|
||||
* **Caching Images** ([cache.md](cache.md)): Caching non-minikube images in minikube
|
||||
|
||||
* **GPUs** ([gpu.md](gpu.md)): Using NVIDIA GPUs on minikube
|
||||
|
||||
* **OpenID Connect Authentication** ([openid_connect_auth.md](openid_connect_auth.md)): Using OIDC Authentication on minikube
|
||||
|
||||
### Installation and debugging
|
||||
|
||||
* **Driver installation** ([drivers.md](drivers.md)): In depth instructions for installing the various hypervisor drivers
|
||||
|
||||
* **Debugging minikube** ([debugging.md](debugging.md)): General practices for debugging the minikube binary itself
|
||||
|
||||
### Developing on the minikube cluster
|
||||
|
||||
* **Reusing the Docker Daemon** ([reusing_the_docker_daemon.md](reusing_the_docker_daemon.md)): How to point your docker CLI to the docker daemon running inside minikube
|
||||
|
||||
* **Building images within the VM** ([building_images_within_the_vm.md](building_images_within_the_vm.md)): How to build a container image within the minikube VM
|
||||
|
||||
#### Storage
|
||||
|
||||
* **Persistent Volumes** ([persistent_volumes.md](persistent_volumes.md)): Persistent Volumes in Minikube and persisted locations in the VM
|
||||
|
||||
* **Host Folder Mounting** ([host_folder_mount.md](host_folder_mount.md)): How to mount your files from your host into the minikube VM
|
||||
|
||||
* **Syncing files into the VM** ([syncing-files.md](syncing-files.md)): How to sync files from your host into the minikube VM
|
||||
|
||||
#### Networking
|
||||
|
||||
* **HTTP Proxy** ([http_proxy.md](http_proxy.md)): Instruction on how to run minikube behind a HTTP Proxy
|
||||
|
||||
* **Insecure or Private Registries** ([insecure_registry.md](insecure_registry.md)): How to use private or insecure registries with minikube
|
||||
|
||||
* **Accessing etcd from inside the cluster** ([accessing_etcd.md](accessing_etcd.md))
|
||||
|
||||
* **Networking** ([networking.md](networking.md)): FAQ about networking between the host and minikube VM
|
||||
|
||||
* **Offline** ([offline.md](offline.md)): Details about using minikube offline
|
|
@ -1 +0,0 @@
|
|||
This document has moved to https://minikube.sigs.k8s.io/docs/tasks/accessing-host-resources/
|
|
@ -1 +0,0 @@
|
|||
This document has moved to https://minikube.sigs.k8s.io/docs/tasks/addons/
|
|
@ -1 +0,0 @@
|
|||
This document has moved to https://minikube.sigs.k8s.io/docs/reference/runtimes/
|
|
@ -1 +0,0 @@
|
|||
This document has moved to https://minikube.sigs.k8s.io/docs/tasks/building_within/
|
|
@ -1 +0,0 @@
|
|||
This document has moved to https://minikube.sigs.k8s.io/docs/tasks/caching
|
|
@ -1 +0,0 @@
|
|||
This document has moved to https://minikube.sigs.k8s.io/docs/reference/commands/
|
|
@ -1 +0,0 @@
|
|||
This document has moved to https://minikube.sigs.k8s.io/docs/reference/configuration/kubernetes/
|
|
@ -1 +0,0 @@
|
|||
This document has moved to https://minikube.sigs.k8s.io/docs/contributing/
|
|
@ -1 +0,0 @@
|
|||
This document has moved to https://minikube.sigs.k8s.io/docs/contributing/addons/
|
|
@ -1 +0,0 @@
|
|||
This document has moved to https://minikube.sigs.k8s.io/docs/contributing/drivers/
|
|
@ -1 +0,0 @@
|
|||
This document has moved to https://minikube.sigs.k8s.io/docs/contributing/building/
|
|
@ -1 +0,0 @@
|
|||
This document has moved to https://minikube.sigs.k8s.io/docs/contributing/building/
|
|
@ -1 +0,0 @@
|
|||
This document has moved to https://minikube.sigs.k8s.io/docs/contributing/iso/
|
|
@ -1 +0,0 @@
|
|||
This document has moved to https://minikube.sigs.k8s.io/docs/concepts/principles/
|
|
@ -1 +0,0 @@
|
|||
This document has moved to https://minikube.sigs.k8s.io/docs/contributing/releasing/
|
|
@ -1 +0,0 @@
|
|||
This document has moved to https://minikube.sigs.k8s.io/docs/contributing/roadmap/
|
|
@ -1 +0,0 @@
|
|||
This document has moved to https://minikube.sigs.k8s.io/docs/tasks/dashboard/
|
|
@ -1 +0,0 @@
|
|||
This document has moved to https://minikube.sigs.k8s.io/docs/tasks/debug/
|
|
@ -1 +0,0 @@
|
|||
This document has moved to https://minikube.sigs.k8s.io/docs/reference/drivers/
|
|
@ -1 +0,0 @@
|
|||
This document has moved to https://minikube.sigs.k8s.io/docs/reference/environment_variables
|
|
@ -1 +0,0 @@
|
|||
This document has moved to https://minikube.sigs.k8s.io/docs/tutorials/nvidia_gpu/
|
|
@ -1 +0,0 @@
|
|||
This document has moved to https://minikube.sigs.k8s.io/docs/tasks/mount/
|
|
@ -1 +0,0 @@
|
|||
This document has moved to https://minikube.sigs.k8s.io/docs/reference/networking/proxy/
|
|
@ -1 +0,0 @@
|
|||
This document has moved to https://minikube.sigs.k8s.io/docs/tasks/registry/
|
|
@ -1,2 +0,0 @@
|
|||
This document has moved to https://minikube.sigs.k8s.io/docs/reference/networking/
|
||||
|
|
@ -1 +0,0 @@
|
|||
This document has moved to https://minikube.sigs.k8s.io/docs/reference/disk_cache/
|
|
@ -1 +0,0 @@
|
|||
This document has moved to https://minikube.sigs.k8s.io/docs/tutorials/openid_connect_auth/
|
|
@ -1 +0,0 @@
|
|||
This document has moved to https://minikube.sigs.k8s.io/docs/reference/persistent_volumes/
|
|
@ -1 +0,0 @@
|
|||
This document has moved to https://minikube.sigs.k8s.io/docs/tasks/docker_daemon/
|
|
@ -1 +0,0 @@
|
|||
This document has moved to https://minikube.sigs.k8s.io/docs/tasks/sync/
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue